Compare commits

...

15 Commits

Author SHA1 Message Date
Waleed
5d887fdca7 v0.4.10: ts-sdk updates, db updates 2025-10-08 00:56:04 -07:00
Waleed
1a0fdb32fe chore(docs): update docs (#1578) 2025-10-08 00:55:18 -07:00
Waleed
9d45b8df1e fix(ts-sdk): fix job to publish ts sdk (#1576) 2025-10-08 00:46:30 -07:00
Waleed
ae3a7f0865 fix(db): reduce overall number of db max conncetions to incr performance (#1575) 2025-10-08 00:30:44 -07:00
Vikhyath Mondreti
25f5e31378 v0.4.9: chat streaming API, DB config changes, sockets improvement, var highlighting, bug fixes 2025-10-07 19:24:37 -07:00
Waleed
7bdf0e94d7 fix(curl-example): fixed curl example in deploy modal to reflect selected option (#1573) 2025-10-07 18:46:12 -07:00
Vikhyath Mondreti
8e43774b5e improvement(sockets): position persistence on drag end, perms call only on joining room (#1571) 2025-10-07 17:31:02 -07:00
Waleed
715f42c1a6 feat(highlighting): added resolved vars highlighting to code subblock, to be consistent with other subblocks (#1570) 2025-10-07 17:17:39 -07:00
Waleed
8200e9a88f feat(i18n): update translations (#1569)
* feat(i18n): update translations

* remove duplicate sections

* fix typos
2025-10-07 16:50:14 -07:00
Waleed
c6f6c9e2a5 fix(streaming-response): add in handling for the response block when streaming (#1568) 2025-10-07 16:21:10 -07:00
Vikhyath Mondreti
2d7ba91c0e fix(workspace-selector-kb): fix selector for assigning workspaces for kbs (#1567) 2025-10-07 15:31:04 -07:00
Waleed
872e034312 feat(chat-streaming): added a stream option to workflow execute route, updated SDKs, updated docs (#1565)
* feat(chat-stream): updated workflow id execute route to support streaming via API

* enable streaming via api

* added only text stream option

* cleanup deployed preview componnet

* updated selectedOutputIds to selectedOutput

* updated TS and Python SDKs with async, rate limits, usage, and streaming API routes

* stream non-streaming blocks when streaming is specified

* fix(chat-panel): add onBlockComplete handler to chat panel to stream back blocks as they complete

* update docs

* cleanup

* ack PR comments

* updated next config

* removed getAssetUrl in favor of local assets

* resolve merge conflicts

* remove extra logic to create sensitive result

* simplify internal auth

* remove vercel blob from CSP + next config
2025-10-07 15:10:37 -07:00
Vikhyath Mondreti
a63a7b0262 fix(undo-redo): preserve trigger/advanced mode (#1566)
* fix(undo-redo): preserve trigger/advanced mode

* remove comments
2025-10-07 14:27:46 -07:00
Vikhyath Mondreti
991a020917 feat(nested-workflow-spans): nested child workflow spans in logs sidepanel (#1561)
* feat(nested-workflow-logs): nested workflow logs display

* logs UX consistency between success and error cases

* fix chat execution

* fix schedules trigger

* update all deployment versions dependent exections to use api key owner instead of workflow owner

* fix tests

* simplify tests
2025-10-07 12:32:04 -07:00
Waleed
f03f395225 fix(db): enable database connection pooling in production (#1564)
* fix: enable database connection pooling in production

* debug: add diagnostic endpoints to test NODE_ENV and database pooling

* test: add connection testing endpoint to diagnose production delay

* redeuce num of concurrent connections
2025-10-07 10:27:33 -07:00
121 changed files with 9656 additions and 2602 deletions

View File

@@ -84,6 +84,6 @@ jobs:
```
### Documentation
See the [README](https://github.com/simstudio/sim/tree/main/packages/python-sdk) for usage instructions.
See the [README](https://github.com/simstudioai/sim/tree/main/packages/python-sdk) or the [docs](https://docs.sim.ai/sdks/python) for more information.
draft: false
prerelease: false

View File

@@ -25,7 +25,6 @@ jobs:
registry-url: 'https://registry.npmjs.org/'
- name: Install dependencies
working-directory: packages/ts-sdk
run: bun install
- name: Run tests
@@ -80,6 +79,6 @@ jobs:
```
### Documentation
See the [README](https://github.com/simstudio/sim/tree/main/packages/ts-sdk) for usage instructions.
See the [README](https://github.com/simstudioai/sim/tree/main/packages/ts-sdk) or the [docs](https://docs.sim.ai/sdks/typescript) for more information.
draft: false
prerelease: false

View File

@@ -1,7 +1,7 @@
'use client'
import { useEffect, useRef } from 'react'
import { getVideoUrl } from '@/lib/utils'
import { getAssetUrl } from '@/lib/utils'
interface LightboxProps {
isOpen: boolean
@@ -60,7 +60,7 @@ export function Lightbox({ isOpen, onClose, src, alt, type }: LightboxProps) {
/>
) : (
<video
src={getVideoUrl(src)}
src={getAssetUrl(src)}
autoPlay
loop
muted

View File

@@ -1,7 +1,7 @@
'use client'
import { useState } from 'react'
import { getVideoUrl } from '@/lib/utils'
import { getAssetUrl } from '@/lib/utils'
import { Lightbox } from './lightbox'
interface VideoProps {
@@ -39,7 +39,7 @@ export function Video({
muted={muted}
playsInline={playsInline}
className={`${className} ${enableLightbox ? 'cursor-pointer transition-opacity hover:opacity-90' : ''}`}
src={getVideoUrl(src)}
src={getAssetUrl(src)}
onClick={handleVideoClick}
/>

View File

@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Das offizielle Python SDK für Sim ermöglicht es Ihnen, Workflows programmatisch aus Ihren Python-Anwendungen mithilfe des offiziellen Python SDKs auszuführen.
<Callout type="info">
Das Python SDK unterstützt Python 3.8+ und bietet synchrone Workflow-Ausführung. Alle Workflow-Ausführungen sind derzeit synchron.
Das Python SDK unterstützt Python 3.8+ mit asynchroner Ausführungsunterstützung, automatischer Ratenbegrenzung mit exponentiellem Backoff und Nutzungsverfolgung.
</Callout>
## Installation
@@ -74,12 +74,17 @@ result = client.execute_workflow(
- `workflow_id` (str): Die ID des auszuführenden Workflows
- `input_data` (dict, optional): Eingabedaten, die an den Workflow übergeben werden
- `timeout` (float, optional): Timeout in Sekunden (Standard: 30.0)
- `stream` (bool, optional): Streaming-Antworten aktivieren (Standard: False)
- `selected_outputs` (list[str], optional): Block-Ausgaben, die im `blockName.attribute`Format gestreamt werden sollen (z.B. `["agent1.content"]`)
- `async_execution` (bool, optional): Asynchron ausführen (Standard: False)
**Rückgabewert:** `WorkflowExecutionResult`
**Rückgabe:** `WorkflowExecutionResult | AsyncExecutionResult`
Wenn `async_execution=True`, wird sofort mit einer Task-ID zum Abfragen zurückgegeben. Andernfalls wird auf den Abschluss gewartet.
##### get_workflow_status()
Ruft den Status eines Workflows ab (Deployment-Status usw.).
Den Status eines Workflows abrufen (Bereitstellungsstatus usw.).
```python
status = client.get_workflow_status("workflow-id")
@@ -93,7 +98,7 @@ print("Is deployed:", status.is_deployed)
##### validate_workflow()
Überprüft, ob ein Workflow für die Ausführung bereit ist.
Überprüfen, ob ein Workflow für die Ausführung bereit ist.
```python
is_ready = client.validate_workflow("workflow-id")
@@ -107,28 +112,118 @@ if is_ready:
**Rückgabe:** `bool`
##### execute_workflow_sync()
##### get_job_status()
<Callout type="info">
Derzeit ist diese Methode identisch mit `execute_workflow()`, da alle Ausführungen synchron sind. Diese Methode wird für zukünftige Kompatibilität bereitgestellt, wenn asynchrone Ausführung hinzugefügt wird.
</Callout>
Führt einen Workflow aus (derzeit synchron, identisch mit `execute_workflow()`).
Den Status einer asynchronen Job-Ausführung abrufen.
```python
result = client.execute_workflow_sync(
status = client.get_job_status("task-id-from-async-execution")
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
if status["status"] == "completed":
print("Output:", status["output"])
```
**Parameter:**
- `task_id` (str): Die Task-ID, die von der asynchronen Ausführung zurückgegeben wurde
**Rückgabe:** `Dict[str, Any]`
**Antwortfelder:**
- `success` (bool): Ob die Anfrage erfolgreich war
- `taskId` (str): Die Task-ID
- `status` (str): Einer der Werte `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (dict): Enthält `startedAt`, `completedAt` und `duration`
- `output` (any, optional): Die Workflow-Ausgabe (wenn abgeschlossen)
- `error` (any, optional): Fehlerdetails (wenn fehlgeschlagen)
- `estimatedDuration` (int, optional): Geschätzte Dauer in Millisekunden (wenn in Bearbeitung/in Warteschlange)
##### execute_with_retry()
Einen Workflow mit automatischer Wiederholung bei Ratenbegrenzungsfehlern unter Verwendung von exponentiellem Backoff ausführen.
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"data": "some input"},
timeout=60.0
input_data={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
max_delay=30.0, # Maximum delay in seconds
backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**Parameter:**
- `workflow_id` (str): Die ID des auszuführenden Workflows
- `input_data` (dict, optional): Eingabedaten, die an den Workflow übergeben werden
- `timeout` (float): Timeout für die initiale Anfrage in Sekunden
- `timeout` (float, optional): Timeout in Sekunden
- `stream` (bool, optional): Streaming-Antworten aktivieren
- `selected_outputs` (list, optional): Block-Ausgaben zum Streamen
- `async_execution` (bool, optional): Asynchron ausführen
- `max_retries` (int, optional): Maximale Anzahl von Wiederholungen (Standard: 3)
- `initial_delay` (float, optional): Anfängliche Verzögerung in Sekunden (Standard: 1.0)
- `max_delay` (float, optional): Maximale Verzögerung in Sekunden (Standard: 30.0)
- `backoff_multiplier` (float, optional): Backoff-Multiplikator (Standard: 2.0)
**Rückgabe:** `WorkflowExecutionResult`
**Rückgabewert:** `WorkflowExecutionResult | AsyncExecutionResult`
Die Wiederholungslogik verwendet exponentielles Backoff (1s → 2s → 4s → 8s...) mit ±25% Jitter, um den Thundering-Herd-Effekt zu vermeiden. Wenn die API einen `retry-after` Header bereitstellt, wird dieser stattdessen verwendet.
##### get_rate_limit_info()
Ruft die aktuellen Rate-Limit-Informationen aus der letzten API-Antwort ab.
```python
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
print("Limit:", rate_limit_info.limit)
print("Remaining:", rate_limit_info.remaining)
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
```
**Rückgabewert:** `RateLimitInfo | None`
##### get_usage_limits()
Ruft aktuelle Nutzungslimits und Kontingentinformationen für dein Konto ab.
```python
limits = client.get_usage_limits()
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
print("Current period cost:", limits.usage["currentPeriodCost"])
print("Plan:", limits.usage["plan"])
```
**Rückgabewert:** `UsageLimits`
**Antwortstruktur:**
```python
{
"success": bool,
"rateLimit": {
"sync": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"async": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"authType": str # 'api' or 'manual'
},
"usage": {
"currentPeriodCost": float,
"limit": float,
"plan": str # e.g., 'free', 'pro'
}
}
```
##### set_api_key()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
### AsyncExecutionResult
```python
@dataclass
class AsyncExecutionResult:
success: bool
task_id: str
status: str # 'queued'
created_at: str
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
```
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
### RateLimitInfo
```python
@dataclass
class RateLimitInfo:
limit: int
remaining: int
reset: int
retry_after: Optional[int] = None
```
### UsageLimits
```python
@dataclass
class UsageLimits:
success: bool
rate_limit: Dict[str, Any]
usage: Dict[str, Any]
```
### SimStudioError
```python
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
self.status = status
```
**Häufige Fehlercodes:**
- `UNAUTHORIZED`: Ungültiger API-Schlüssel
- `TIMEOUT`: Zeitüberschreitung bei der Anfrage
- `RATE_LIMIT_EXCEEDED`: Ratengrenze überschritten
- `USAGE_LIMIT_EXCEEDED`: Nutzungsgrenze überschritten
- `EXECUTION_ERROR`: Workflow-Ausführung fehlgeschlagen
## Beispiele
### Grundlegende Workflow-Ausführung
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +387,7 @@ Behandeln Sie verschiedene Fehlertypen, die während der Workflow-Ausführung au
from simstudio import SimStudioClient, SimStudioError
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -279,16 +414,7 @@ def execute_with_error_handling():
Verwenden Sie den Client als Kontextmanager, um die Ressourcenbereinigung automatisch zu handhaben:
```python
from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
```
---CODE-PLACEHOLDER-ef99d3dd509e04865d5b6b0e0e03d3f8---
### Batch-Workflow-Ausführung
@@ -298,7 +424,7 @@ Führen Sie mehrere Workflows effizient aus:
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,9 +465,233 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
### Asynchrone Workflow-Ausführung
Führen Sie Workflows asynchron für lang laufende Aufgaben aus:
```python
import os
import time
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_async():
try:
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
# Check if result is an async execution
if hasattr(result, 'task_id'):
print(f"Task ID: {result.task_id}")
print(f"Status endpoint: {result.links['status']}")
# Poll for completion
status = client.get_job_status(result.task_id)
while status["status"] in ["queued", "processing"]:
print(f"Current status: {status['status']}")
time.sleep(2) # Wait 2 seconds
status = client.get_job_status(result.task_id)
if status["status"] == "completed":
print("Workflow completed!")
print(f"Output: {status['output']}")
print(f"Duration: {status['metadata']['duration']}")
else:
print(f"Workflow failed: {status['error']}")
except Exception as error:
print(f"Error: {error}")
execute_async()
```
### Rate-Limiting und Wiederholungsversuche
Behandle Rate-Limits automatisch mit exponentiellem Backoff:
```python
import os
from simstudio import SimStudioClient, SimStudioError
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_retry_handling():
try:
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
backoff_multiplier=2.0
)
print(f"Success: {result}")
except SimStudioError as error:
if error.code == "RATE_LIMIT_EXCEEDED":
print("Rate limit exceeded after all retries")
# Check rate limit info
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
from datetime import datetime
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
print(f"Rate limit resets at: {reset_time}")
execute_with_retry_handling()
```
### Nutzungsüberwachung
Überwache deine Kontonutzung und -limits:
```python
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def check_usage():
try:
limits = client.get_usage_limits()
print("=== Rate Limits ===")
print("Sync requests:")
print(f" Limit: {limits.rate_limit['sync']['limit']}")
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
print("\nAsync requests:")
print(f" Limit: {limits.rate_limit['async']['limit']}")
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
print("\n=== Usage ===")
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
print(f"Limit: ${limits.usage['limit']:.2f}")
print(f"Plan: {limits.usage['plan']}")
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
print(f"Usage: {percent_used:.1f}%")
if percent_used > 80:
print("⚠️ Warning: You are approaching your usage limit!")
except Exception as error:
print(f"Error checking usage: {error}")
check_usage()
```
### Streaming-Workflow-Ausführung
Führe Workflows mit Echtzeit-Streaming-Antworten aus:
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_streaming():
"""Execute workflow with streaming enabled."""
try:
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
print("Workflow result:", result)
except Exception as error:
print("Error:", error)
execute_with_streaming()
```
Die Streaming-Antwort folgt dem Server-Sent Events (SSE) Format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**Flask-Streaming-Beispiel:**
```python
from flask import Flask, Response, stream_with_context
import requests
import json
import os
app = Flask(__name__)
@app.route('/stream-workflow')
def stream_workflow():
"""Stream workflow execution to the client."""
def generate():
response = requests.post(
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
headers={
'Content-Type': 'application/json',
'X-API-Key': os.getenv('SIM_API_KEY')
},
json={
'message': 'Generate a story',
'stream': True,
'selectedOutputs': ['agent1.content']
},
stream=True
)
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
data = decoded_line[6:] # Remove 'data: ' prefix
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if 'chunk' in parsed:
yield f"data: {json.dumps(parsed)}\n\n"
elif parsed.get('event') == 'done':
yield f"data: {json.dumps(parsed)}\n\n"
print("Execution complete:", parsed.get('metadata'))
except json.JSONDecodeError:
pass
return Response(
stream_with_context(generate()),
mimetype='text/event-stream'
)
if __name__ == '__main__':
app.run(debug=True)
```
### Umgebungskonfiguration
Konfigurieren Sie den Client mit Umgebungsvariablen:
Konfiguriere den Client mit Umgebungsvariablen:
<Tabs items={['Development', 'Production']}>
<Tab value="Development">
@@ -352,8 +702,8 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
# Development configuration
client = SimStudioClient(
api_key=os.getenv("SIMSTUDIO_API_KEY"),
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
api_key=os.getenv("SIM_API_KEY")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +715,13 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
from simstudio import SimStudioClient
# Production configuration with error handling
api_key = os.getenv("SIMSTUDIO_API_KEY")
api_key = os.getenv("SIM_API_KEY")
if not api_key:
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -382,19 +732,19 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
<Steps>
<Step title="Bei Sim anmelden">
Navigieren Sie zu [Sim](https://sim.ai) und melden Sie sich bei Ihrem Konto an.
Navigiere zu [Sim](https://sim.ai) und melde dich bei deinem Konto an.
</Step>
<Step title="Ihren Workflow öffnen">
Navigieren Sie zu dem Workflow, den Sie programmatisch ausführen möchten.
<Step title="Öffne deinen Workflow">
Navigiere zu dem Workflow, den du programmatisch ausführen möchtest.
</Step>
<Step title="Ihren Workflow bereitstellen">
Klicken Sie auf "Deploy", um Ihren Workflow bereitzustellen, falls dies noch nicht geschehen ist.
<Step title="Deploye deinen Workflow">
Klicke auf "Deploy", um deinen Workflow zu deployen, falls dies noch nicht geschehen ist.
</Step>
<Step title="API-Schlüssel erstellen oder auswählen">
Wählen Sie während des Bereitstellungsprozesses einen API-Schlüssel aus oder erstellen Sie einen neuen.
<Step title="Erstelle oder wähle einen API-Schlüssel">
Wähle während des Deployment-Prozesses einen API-Schlüssel aus oder erstelle einen neuen.
</Step>
<Step title="API-Schlüssel kopieren">
Kopieren Sie den API-Schlüssel zur Verwendung in Ihrer Python-Anwendung.
<Step title="Kopiere den API-Schlüssel">
Kopiere den API-Schlüssel zur Verwendung in deiner Python-Anwendung.
</Step>
</Steps>

View File

@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Das offizielle TypeScript/JavaScript SDK für Sim bietet vollständige Typsicherheit und unterstützt sowohl Node.js- als auch Browser-Umgebungen, sodass Sie Workflows programmatisch aus Ihren Node.js-Anwendungen, Webanwendungen und anderen JavaScript-Umgebungen ausführen können. Alle Workflow-Ausführungen sind derzeit synchron.
Das offizielle TypeScript/JavaScript SDK für Sim bietet vollständige Typsicherheit und unterstützt sowohl Node.js- als auch Browser-Umgebungen, sodass Sie Workflows programmatisch aus Ihren Node.js-Anwendungen, Webanwendungen und anderen JavaScript-Umgebungen ausführen können.
<Callout type="info">
Das TypeScript SDK bietet vollständige Typsicherheit und unterstützt sowohl Node.js- als auch Browser-Umgebungen. Alle Workflow-Ausführungen sind derzeit synchron.
Das TypeScript SDK bietet vollständige Typsicherheit, Unterstützung für asynchrone Ausführung, automatische Ratenbegrenzung mit exponentiellem Backoff und Nutzungsverfolgung.
</Callout>
## Installation
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
- `options` (ExecutionOptions, optional):
- `input` (any): Eingabedaten, die an den Workflow übergeben werden
- `timeout` (number): Timeout in Millisekunden (Standard: 30000)
- `stream` (boolean): Streaming-Antworten aktivieren (Standard: false)
- `selectedOutputs` (string[]): Block-Ausgaben, die im `blockName.attribute`Format gestreamt werden sollen (z.B. `["agent1.content"]`)
- `async` (boolean): Asynchron ausführen (Standard: false)
**Rückgabewert:** `Promise<WorkflowExecutionResult>`
**Rückgabe:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
Wenn `async: true`, wird sofort mit einer Task-ID zum Abfragen zurückgegeben. Andernfalls wird auf den Abschluss gewartet.
##### getWorkflowStatus()
@@ -110,7 +115,7 @@ console.log('Is deployed:', status.isDeployed);
**Parameter:**
- `workflowId` (string): Die ID des Workflows
**Rückgabewert:** `Promise<WorkflowStatus>`
**Rückgabe:** `Promise<WorkflowStatus>`
##### validateWorkflow()
@@ -126,34 +131,123 @@ if (isReady) {
**Parameter:**
- `workflowId` (string): Die ID des Workflows
**Rückgabewert:** `Promise<boolean>`
**Rückgabe:** `Promise<boolean>`
##### executeWorkflowSync()
##### getJobStatus()
<Callout type="info">
Derzeit ist diese Methode identisch mit `executeWorkflow()`, da alle Ausführungen synchron sind. Diese Methode wird für zukünftige Kompatibilität bereitgestellt, wenn asynchrone Ausführung hinzugefügt wird.
</Callout>
Einen Workflow ausführen (derzeit synchron, identisch mit `executeWorkflow()`).
Den Status einer asynchronen Job-Ausführung abrufen.
```typescript
const result = await client.executeWorkflowSync('workflow-id', {
input: { data: 'some input' },
timeout: 60000
const status = await client.getJobStatus('task-id-from-async-execution');
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
if (status.status === 'completed') {
console.log('Output:', status.output);
}
```
**Parameter:**
- `taskId` (string): Die Task-ID, die von der asynchronen Ausführung zurückgegeben wurde
**Rückgabe:** `Promise<JobStatus>`
**Antwortfelder:**
- `success` (boolean): Ob die Anfrage erfolgreich war
- `taskId` (string): Die Task-ID
- `status` (string): Einer der Werte `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (object): Enthält `startedAt`, `completedAt` und `duration`
- `output` (any, optional): Die Workflow-Ausgabe (wenn abgeschlossen)
- `error` (any, optional): Fehlerdetails (wenn fehlgeschlagen)
- `estimatedDuration` (number, optional): Geschätzte Dauer in Millisekunden (wenn in Bearbeitung/in der Warteschlange)
##### executeWithRetry()
Führt einen Workflow mit automatischer Wiederholung bei Ratenlimitfehlern unter Verwendung von exponentiellem Backoff aus.
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
initialDelay: 1000, // Initial delay in ms (1 second)
maxDelay: 30000, // Maximum delay in ms (30 seconds)
backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**Parameter:**
- `workflowId` (string): Die ID des auszuführenden Workflows
- `options` (ExecutionOptions, optional):
- `input` (any): Eingabedaten, die an den Workflow übergeben werden
- `timeout` (number): Timeout für die initiale Anfrage in Millisekunden
- `options` (ExecutionOptions, optional): Gleich wie `executeWorkflow()`
- `retryOptions` (RetryOptions, optional):
- `maxRetries` (number): Maximale Anzahl von Wiederholungen (Standard: 3)
- `initialDelay` (number): Anfängliche Verzögerung in ms (Standard: 1000)
- `maxDelay` (number): Maximale Verzögerung in ms (Standard: 30000)
- `backoffMultiplier` (number): Backoff-Multiplikator (Standard: 2)
**Rückgabewert:** `Promise<WorkflowExecutionResult>`
**Rückgabewert:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
Die Wiederholungslogik verwendet exponentiellen Backoff (1s → 2s → 4s → 8s...) mit ±25% Jitter, um den Thundering-Herd-Effekt zu vermeiden. Wenn die API einen `retry-after`Header bereitstellt, wird dieser stattdessen verwendet.
##### getRateLimitInfo()
Ruft die aktuellen Ratenlimit-Informationen aus der letzten API-Antwort ab.
```typescript
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Limit:', rateLimitInfo.limit);
console.log('Remaining:', rateLimitInfo.remaining);
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
}
```
**Rückgabewert:** `RateLimitInfo | null`
##### getUsageLimits()
Ruft aktuelle Nutzungslimits und Kontingentinformationen für Ihr Konto ab.
```typescript
const limits = await client.getUsageLimits();
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
console.log('Current period cost:', limits.usage.currentPeriodCost);
console.log('Plan:', limits.usage.plan);
```
**Rückgabewert:** `Promise<UsageLimits>`
**Antwortstruktur:**
```typescript
{
success: boolean
rateLimit: {
sync: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
async: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
authType: string // 'api' or 'manual'
}
usage: {
currentPeriodCost: number
limit: number
plan: string // e.g., 'free', 'pro'
}
}
```
##### setApiKey()
Den API-Schlüssel aktualisieren.
Aktualisiert den API-Schlüssel.
```typescript
client.setApiKey('new-api-key');
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
##### setBaseUrl()
Die Basis-URL aktualisieren.
Aktualisiert die Basis-URL.
```typescript
client.setBaseUrl('https://my-custom-domain.com');
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
### AsyncExecutionResult
```typescript
interface AsyncExecutionResult {
success: boolean;
taskId: string;
status: 'queued';
createdAt: string;
links: {
status: string; // e.g., "/api/jobs/{taskId}"
};
}
```
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
### RateLimitInfo
```typescript
interface RateLimitInfo {
limit: number;
remaining: number;
reset: number;
retryAfter?: number;
}
```
### UsageLimits
```typescript
interface UsageLimits {
success: boolean;
rateLimit: {
sync: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
async: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
authType: string;
};
usage: {
currentPeriodCost: number;
limit: number;
plan: string;
};
}
```
### SimStudioError
```typescript
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
}
```
**Häufige Fehlercodes:**
- `UNAUTHORIZED`: Ungültiger API-Schlüssel
- `TIMEOUT`: Zeitüberschreitung der Anfrage
- `RATE_LIMIT_EXCEEDED`: Ratengrenze überschritten
- `USAGE_LIMIT_EXCEEDED`: Nutzungsgrenze überschritten
- `EXECUTION_ERROR`: Workflow-Ausführung fehlgeschlagen
## Beispiele
### Grundlegende Workflow-Ausführung
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ Behandeln Sie verschiedene Fehlertypen, die während der Workflow-Ausführung au
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +487,14 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -467,16 +621,16 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
Bei der Verwendung des SDK im Browser sollten Sie darauf achten, keine sensiblen API-Schlüssel offenzulegen. Erwägen Sie die Verwendung eines Backend-Proxys oder öffentlicher API-Schlüssel mit eingeschränkten Berechtigungen.
</Callout>
### React Hook Beispiel
### React Hook-Beispiel
Erstellen Sie einen benutzerdefinierten React Hook für die Workflow-Ausführung:
Erstellen eines benutzerdefinierten React-Hooks für die Workflow-Ausführung:
```typescript
import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
<button onClick={handleExecute} disabled={loading}>
{loading ? 'Executing...' : 'Execute Workflow'}
</button>
{error && <div>Error: {error.message}</div>}
{result && (
<div>
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
## Ihren API-Schlüssel erhalten
### Asynchrone Workflow-Ausführung
Führen Sie Workflows asynchron für lang laufende Aufgaben aus:
```typescript
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
async: true // Execute asynchronously
});
// Check if result is an async execution
if ('taskId' in result) {
console.log('Task ID:', result.taskId);
console.log('Status endpoint:', result.links.status);
// Poll for completion
let status = await client.getJobStatus(result.taskId);
while (status.status === 'queued' || status.status === 'processing') {
console.log('Current status:', status.status);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
status = await client.getJobStatus(result.taskId);
}
if (status.status === 'completed') {
console.log('Workflow completed!');
console.log('Output:', status.output);
console.log('Duration:', status.metadata.duration);
} else {
console.error('Workflow failed:', status.error);
}
}
} catch (error) {
console.error('Error:', error);
}
}
executeAsync();
```
### Rate-Limiting und Wiederholungsversuche
Automatische Behandlung von Rate-Limits mit exponentiellem Backoff:
```typescript
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
backoffMultiplier: 2
});
console.log('Success:', result);
} catch (error) {
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
console.error('Rate limit exceeded after all retries');
// Check rate limit info
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
}
}
}
}
```
### Nutzungsüberwachung
Überwachen Sie Ihre Kontonutzung und -limits:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function checkUsage() {
try {
const limits = await client.getUsageLimits();
console.log('=== Rate Limits ===');
console.log('Sync requests:');
console.log(' Limit:', limits.rateLimit.sync.limit);
console.log(' Remaining:', limits.rateLimit.sync.remaining);
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
console.log('\nAsync requests:');
console.log(' Limit:', limits.rateLimit.async.limit);
console.log(' Remaining:', limits.rateLimit.async.remaining);
console.log(' Resets at:', limits.rateLimit.async.resetAt);
console.log(' Is limited:', limits.rateLimit.async.isLimited);
console.log('\n=== Usage ===');
console.log('Current period cost:
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithStreaming() {
try {
// Streaming für bestimmte Block-Ausgaben aktivieren
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Count to five' },
stream: true,
selectedOutputs: ['agent1.content'] // Format blockName.attribute verwenden
});
console.log('Workflow-Ergebnis:', result);
} catch (error) {
console.error('Fehler:', error);
}
}
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", zwei"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**React Streaming Example:**
```typescript
import { useState, useEffect } from 'react';
function StreamingWorkflow() {
const [output, setOutput] = useState('');
const [loading, setLoading] = useState(false);
const executeStreaming = async () => {
setLoading(true);
setOutput('');
// WICHTIG: Führen Sie diesen API-Aufruf von Ihrem Backend-Server aus, nicht vom Browser
// Setzen Sie niemals Ihren API-Schlüssel im Client-seitigen Code frei
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': process.env.SIM_API_KEY! // Nur serverseitige Umgebungsvariable
},
body: JSON.stringify({
message: 'Generate a story',
stream: true,
selectedOutputs: ['agent1.content']
})
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (reader) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
setLoading(false);
break;
}
try {
const parsed = JSON.parse(data);
if (parsed.chunk) {
setOutput(prev => prev + parsed.chunk);
} else if (parsed.event === 'done') {
console.log('Ausführung abgeschlossen:', parsed.metadata);
}
} catch (e) {
// Ungültiges JSON überspringen
}
}
}
}
};
return (
<div>
<button onClick={executeStreaming} disabled={loading}>
{loading ? 'Generiere...' : 'Streaming starten'}
</button>
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
</div>
);
}
```
## Getting Your API Key
<Steps>
<Step title="Bei Sim anmelden">
Navigieren Sie zu [Sim](https://sim.ai) und melden Sie sich bei Ihrem Konto an.
<Step title="Log in to Sim">
Navigate to [Sim](https://sim.ai) and log in to your account.
</Step>
<Step title="Öffnen Sie Ihren Workflow">
Navigieren Sie zu dem Workflow, den Sie programmatisch ausführen möchten.
<Step title="Open your workflow">
Navigate to the workflow you want to execute programmatically.
</Step>
<Step title="Deployen Sie Ihren Workflow">
Klicken Sie auf "Deploy", um Ihren Workflow zu deployen, falls dies noch nicht geschehen ist.
<Step title="Deploy your workflow">
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
</Step>
<Step title="Erstellen oder wählen Sie einen API-Schlüssel">
Wählen Sie während des Deployment-Prozesses einen API-Schlüssel aus oder erstellen Sie einen neuen.
<Step title="Create or select an API key">
During the deployment process, select or create an API key.
</Step>
<Step title="Kopieren Sie den API-Schlüssel">
Kopieren Sie den API-Schlüssel zur Verwendung in Ihrer TypeScript/JavaScript-Anwendung.
<Step title="Copy the API key">
Copy the API key to use in your TypeScript/JavaScript application.
</Step>
</Steps>
<Callout type="warning">
Halten Sie Ihren API-Schlüssel sicher und committen Sie ihn niemals in die Versionskontrolle. Verwenden Sie Umgebungsvariablen oder sicheres Konfigurationsmanagement.
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
</Callout>
## Anforderungen
## Requirements
- Node.js 16+
- TypeScript 5.0+ (für TypeScript-Projekte)
- TypeScript 5.0+ (for TypeScript projects)
## TypeScript-Unterstützung
## TypeScript Support
Das SDK ist in TypeScript geschrieben und bietet vollständige Typsicherheit:
The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -586,22 +969,22 @@ import {
SimStudioError
} from 'simstudio-ts-sdk';
// Type-safe client initialization
// Typsichere Client-Initialisierung
const client: SimStudioClient = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
// Typsichere Workflow-Ausführung
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
message: 'Hello, TypeScript!'
}
});
// Type-safe status checking
// Typsichere Statusprüfung
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
## Lizenz
## License
Apache-2.0

View File

@@ -38,14 +38,92 @@ curl -X POST \
Erfolgreiche Antworten geben das serialisierte Ausführungsergebnis vom Executor zurück. Fehler zeigen Validierungs-, Authentifizierungs- oder Workflow-Fehler an.
## Ausgabe-Referenz
## Streaming-Antworten
Aktivieren Sie Echtzeit-Streaming, um Workflow-Ausgaben zu erhalten, während sie zeichen-für-zeichen generiert werden. Dies ist nützlich, um KI-Antworten progressiv für Benutzer anzuzeigen.
### Anfrageparameter
Fügen Sie diese Parameter hinzu, um Streaming zu aktivieren:
- `stream` - Auf `true` setzen, um Server-Sent Events (SSE) Streaming zu aktivieren
- `selectedOutputs` - Array von Block-Ausgaben zum Streamen (z.B. `["agent1.content"]`)
### Block-Ausgabeformat
Verwenden Sie das `blockName.attribute` Format, um anzugeben, welche Block-Ausgaben gestreamt werden sollen:
- Format: `"blockName.attribute"` (z.B. Wenn Sie den Inhalt des Agent 1-Blocks streamen möchten, würden Sie `"agent1.content"` verwenden)
- Blocknamen sind nicht case-sensitive und Leerzeichen werden ignoriert
### Beispielanfrage
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Count to five",
"stream": true,
"selectedOutputs": ["agent1.content"]
}'
```
### Antwortformat
Streaming-Antworten verwenden das Server-Sent Events (SSE) Format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
Jedes Ereignis enthält:
- **Streaming-Chunks**: `{"blockId": "...", "chunk": "text"}` - Echtzeit-Text während er generiert wird
- **Abschlussereignis**: `{"event": "done", ...}` - Ausführungsmetadaten und vollständige Ergebnisse
- **Terminator**: `[DONE]` - Signalisiert das Ende des Streams
### Streaming mehrerer Blöcke
Wenn `selectedOutputs` mehrere Blöcke enthält, zeigt jeder Chunk an, welcher Block ihn erzeugt hat:
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Process this request",
"stream": true,
"selectedOutputs": ["agent1.content", "agent2.content"]
}'
```
Das Feld `blockId` in jedem Chunk ermöglicht es Ihnen, die Ausgabe zum richtigen UI-Element zu leiten:
```
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
data: {"blockId":"agent1-uuid","chunk":" complete"}
```
## Ausgabereferenz
| Referenz | Beschreibung |
|-----------|-------------|
| `<api.field>` | Im Eingabeformat definiertes Feld |
| `<api.input>` | Gesamter strukturierter Anfragekörper |
Wenn kein Eingabeformat definiert ist, stellt der Executor das rohe JSON nur unter `<api.input>` bereit.
Wenn kein Eingabeformat definiert ist, stellt der Executor das rohe JSON nur unter `<api.input>` zur Verfügung.
<Callout type="warning">
Ein Workflow kann nur einen API-Trigger enthalten. Veröffentlichen Sie nach Änderungen eine neue Bereitstellung, damit der Endpunkt aktuell bleibt.

View File

@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
The official Python SDK for Sim allows you to execute workflows programmatically from your Python applications using the official Python SDK.
<Callout type="info">
The Python SDK supports Python 3.8+ and provides synchronous workflow execution. All workflow executions are currently synchronous.
The Python SDK supports Python 3.8+ with async execution support, automatic rate limiting with exponential backoff, and usage tracking.
</Callout>
## Installation
@@ -74,8 +74,13 @@ result = client.execute_workflow(
- `workflow_id` (str): The ID of the workflow to execute
- `input_data` (dict, optional): Input data to pass to the workflow
- `timeout` (float, optional): Timeout in seconds (default: 30.0)
- `stream` (bool, optional): Enable streaming responses (default: False)
- `selected_outputs` (list[str], optional): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
- `async_execution` (bool, optional): Execute asynchronously (default: False)
**Returns:** `WorkflowExecutionResult`
**Returns:** `WorkflowExecutionResult | AsyncExecutionResult`
When `async_execution=True`, returns immediately with a task ID for polling. Otherwise, waits for completion.
##### get_workflow_status()
@@ -107,28 +112,117 @@ if is_ready:
**Returns:** `bool`
##### execute_workflow_sync()
##### get_job_status()
<Callout type="info">
Currently, this method is identical to `execute_workflow()` since all executions are synchronous. This method is provided for future compatibility when asynchronous execution is added.
</Callout>
Execute a workflow (currently synchronous, same as `execute_workflow()`).
Get the status of an async job execution.
```python
result = client.execute_workflow_sync(
status = client.get_job_status("task-id-from-async-execution")
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
if status["status"] == "completed":
print("Output:", status["output"])
```
**Parameters:**
- `task_id` (str): The task ID returned from async execution
**Returns:** `Dict[str, Any]`
**Response fields:**
- `success` (bool): Whether the request was successful
- `taskId` (str): The task ID
- `status` (str): One of `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (dict): Contains `startedAt`, `completedAt`, and `duration`
- `output` (any, optional): The workflow output (when completed)
- `error` (any, optional): Error details (when failed)
- `estimatedDuration` (int, optional): Estimated duration in milliseconds (when processing/queued)
##### execute_with_retry()
Execute a workflow with automatic retry on rate limit errors using exponential backoff.
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"data": "some input"},
timeout=60.0
input_data={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
max_delay=30.0, # Maximum delay in seconds
backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**Parameters:**
- `workflow_id` (str): The ID of the workflow to execute
- `input_data` (dict, optional): Input data to pass to the workflow
- `timeout` (float): Timeout for the initial request in seconds
- `timeout` (float, optional): Timeout in seconds
- `stream` (bool, optional): Enable streaming responses
- `selected_outputs` (list, optional): Block outputs to stream
- `async_execution` (bool, optional): Execute asynchronously
- `max_retries` (int, optional): Maximum number of retries (default: 3)
- `initial_delay` (float, optional): Initial delay in seconds (default: 1.0)
- `max_delay` (float, optional): Maximum delay in seconds (default: 30.0)
- `backoff_multiplier` (float, optional): Backoff multiplier (default: 2.0)
**Returns:** `WorkflowExecutionResult`
**Returns:** `WorkflowExecutionResult | AsyncExecutionResult`
The retry logic uses exponential backoff (1s → 2s → 4s → 8s...) with ±25% jitter to prevent thundering herd. If the API provides a `retry-after` header, it will be used instead.
##### get_rate_limit_info()
Get the current rate limit information from the last API response.
```python
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
print("Limit:", rate_limit_info.limit)
print("Remaining:", rate_limit_info.remaining)
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
```
**Returns:** `RateLimitInfo | None`
##### get_usage_limits()
Get current usage limits and quota information for your account.
```python
limits = client.get_usage_limits()
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
print("Current period cost:", limits.usage["currentPeriodCost"])
print("Plan:", limits.usage["plan"])
```
**Returns:** `UsageLimits`
**Response structure:**
```python
{
"success": bool,
"rateLimit": {
"sync": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"async": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"authType": str # 'api' or 'manual'
},
"usage": {
"currentPeriodCost": float,
"limit": float,
"plan": str # e.g., 'free', 'pro'
}
}
```
##### set_api_key()
@@ -170,6 +264,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
### AsyncExecutionResult
```python
@dataclass
class AsyncExecutionResult:
success: bool
task_id: str
status: str # 'queued'
created_at: str
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
```
### WorkflowStatus
```python
@@ -181,6 +287,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
### RateLimitInfo
```python
@dataclass
class RateLimitInfo:
limit: int
remaining: int
reset: int
retry_after: Optional[int] = None
```
### UsageLimits
```python
@dataclass
class UsageLimits:
success: bool
rate_limit: Dict[str, Any]
usage: Dict[str, Any]
```
### SimStudioError
```python
@@ -191,6 +318,13 @@ class SimStudioError(Exception):
self.status = status
```
**Common error codes:**
- `UNAUTHORIZED`: Invalid API key
- `TIMEOUT`: Request timed out
- `RATE_LIMIT_EXCEEDED`: Rate limit exceeded
- `USAGE_LIMIT_EXCEEDED`: Usage limit exceeded
- `EXECUTION_ERROR`: Workflow execution failed
## Examples
### Basic Workflow Execution
@@ -214,7 +348,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +386,7 @@ Handle different types of errors that may occur during workflow execution:
from simstudio import SimStudioClient, SimStudioError
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,7 +418,7 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +432,7 @@ Execute multiple workflows efficiently:
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +473,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
### Async Workflow Execution
Execute workflows asynchronously for long-running tasks:
```python
import os
import time
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_async():
try:
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
# Check if result is an async execution
if hasattr(result, 'task_id'):
print(f"Task ID: {result.task_id}")
print(f"Status endpoint: {result.links['status']}")
# Poll for completion
status = client.get_job_status(result.task_id)
while status["status"] in ["queued", "processing"]:
print(f"Current status: {status['status']}")
time.sleep(2) # Wait 2 seconds
status = client.get_job_status(result.task_id)
if status["status"] == "completed":
print("Workflow completed!")
print(f"Output: {status['output']}")
print(f"Duration: {status['metadata']['duration']}")
else:
print(f"Workflow failed: {status['error']}")
except Exception as error:
print(f"Error: {error}")
execute_async()
```
### Rate Limiting and Retry
Handle rate limits automatically with exponential backoff:
```python
import os
from simstudio import SimStudioClient, SimStudioError
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_retry_handling():
try:
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
backoff_multiplier=2.0
)
print(f"Success: {result}")
except SimStudioError as error:
if error.code == "RATE_LIMIT_EXCEEDED":
print("Rate limit exceeded after all retries")
# Check rate limit info
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
from datetime import datetime
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
print(f"Rate limit resets at: {reset_time}")
execute_with_retry_handling()
```
### Usage Monitoring
Monitor your account usage and limits:
```python
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def check_usage():
try:
limits = client.get_usage_limits()
print("=== Rate Limits ===")
print("Sync requests:")
print(f" Limit: {limits.rate_limit['sync']['limit']}")
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
print("\nAsync requests:")
print(f" Limit: {limits.rate_limit['async']['limit']}")
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
print("\n=== Usage ===")
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
print(f"Limit: ${limits.usage['limit']:.2f}")
print(f"Plan: {limits.usage['plan']}")
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
print(f"Usage: {percent_used:.1f}%")
if percent_used > 80:
print("⚠️ Warning: You are approaching your usage limit!")
except Exception as error:
print(f"Error checking usage: {error}")
check_usage()
```
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_streaming():
"""Execute workflow with streaming enabled."""
try:
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
print("Workflow result:", result)
except Exception as error:
print("Error:", error)
execute_with_streaming()
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**Flask Streaming Example:**
```python
from flask import Flask, Response, stream_with_context
import requests
import json
import os
app = Flask(__name__)
@app.route('/stream-workflow')
def stream_workflow():
"""Stream workflow execution to the client."""
def generate():
response = requests.post(
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
headers={
'Content-Type': 'application/json',
'X-API-Key': os.getenv('SIM_API_KEY')
},
json={
'message': 'Generate a story',
'stream': True,
'selectedOutputs': ['agent1.content']
},
stream=True
)
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
data = decoded_line[6:] # Remove 'data: ' prefix
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if 'chunk' in parsed:
yield f"data: {json.dumps(parsed)}\n\n"
elif parsed.get('event') == 'done':
yield f"data: {json.dumps(parsed)}\n\n"
print("Execution complete:", parsed.get('metadata'))
except json.JSONDecodeError:
pass
return Response(
stream_with_context(generate()),
mimetype='text/event-stream'
)
if __name__ == '__main__':
app.run(debug=True)
```
### Environment Configuration
Configure the client using environment variables:
@@ -351,8 +709,8 @@ Configure the client using environment variables:
# Development configuration
client = SimStudioClient(
api_key=os.getenv("SIMSTUDIO_API_KEY"),
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
api_key=os.getenv("SIM_API_KEY")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
</Tab>
@@ -362,13 +720,13 @@ Configure the client using environment variables:
from simstudio import SimStudioClient
# Production configuration with error handling
api_key = os.getenv("SIMSTUDIO_API_KEY")
api_key = os.getenv("SIM_API_KEY")
if not api_key:
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
</Tab>

View File

@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
The official TypeScript/JavaScript SDK for Sim provides full type safety and supports both Node.js and browser environments, allowing you to execute workflows programmatically from your Node.js applications, web applications, and other JavaScript environments. All workflow executions are currently synchronous.
The official TypeScript/JavaScript SDK for Sim provides full type safety and supports both Node.js and browser environments, allowing you to execute workflows programmatically from your Node.js applications, web applications, and other JavaScript environments.
<Callout type="info">
The TypeScript SDK provides full type safety and supports both Node.js and browser environments. All workflow executions are currently synchronous.
The TypeScript SDK provides full type safety, async execution support, automatic rate limiting with exponential backoff, and usage tracking.
</Callout>
## Installation
@@ -89,8 +89,13 @@ const result = await client.executeWorkflow('workflow-id', {
- `options` (ExecutionOptions, optional):
- `input` (any): Input data to pass to the workflow
- `timeout` (number): Timeout in milliseconds (default: 30000)
- `stream` (boolean): Enable streaming responses (default: false)
- `selectedOutputs` (string[]): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
- `async` (boolean): Execute asynchronously (default: false)
**Returns:** `Promise<WorkflowExecutionResult>`
**Returns:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
When `async: true`, returns immediately with a task ID for polling. Otherwise, waits for completion.
##### getWorkflowStatus()
@@ -122,28 +127,116 @@ if (isReady) {
**Returns:** `Promise<boolean>`
##### executeWorkflowSync()
##### getJobStatus()
<Callout type="info">
Currently, this method is identical to `executeWorkflow()` since all executions are synchronous. This method is provided for future compatibility when asynchronous execution is added.
</Callout>
Execute a workflow (currently synchronous, same as `executeWorkflow()`).
Get the status of an async job execution.
```typescript
const result = await client.executeWorkflowSync('workflow-id', {
input: { data: 'some input' },
timeout: 60000
const status = await client.getJobStatus('task-id-from-async-execution');
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
if (status.status === 'completed') {
console.log('Output:', status.output);
}
```
**Parameters:**
- `taskId` (string): The task ID returned from async execution
**Returns:** `Promise<JobStatus>`
**Response fields:**
- `success` (boolean): Whether the request was successful
- `taskId` (string): The task ID
- `status` (string): One of `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (object): Contains `startedAt`, `completedAt`, and `duration`
- `output` (any, optional): The workflow output (when completed)
- `error` (any, optional): Error details (when failed)
- `estimatedDuration` (number, optional): Estimated duration in milliseconds (when processing/queued)
##### executeWithRetry()
Execute a workflow with automatic retry on rate limit errors using exponential backoff.
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
initialDelay: 1000, // Initial delay in ms (1 second)
maxDelay: 30000, // Maximum delay in ms (30 seconds)
backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**Parameters:**
- `workflowId` (string): The ID of the workflow to execute
- `options` (ExecutionOptions, optional):
- `input` (any): Input data to pass to the workflow
- `timeout` (number): Timeout for the initial request in milliseconds
- `options` (ExecutionOptions, optional): Same as `executeWorkflow()`
- `retryOptions` (RetryOptions, optional):
- `maxRetries` (number): Maximum number of retries (default: 3)
- `initialDelay` (number): Initial delay in ms (default: 1000)
- `maxDelay` (number): Maximum delay in ms (default: 30000)
- `backoffMultiplier` (number): Backoff multiplier (default: 2)
**Returns:** `Promise<WorkflowExecutionResult>`
**Returns:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
The retry logic uses exponential backoff (1s → 2s → 4s → 8s...) with ±25% jitter to prevent thundering herd. If the API provides a `retry-after` header, it will be used instead.
##### getRateLimitInfo()
Get the current rate limit information from the last API response.
```typescript
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Limit:', rateLimitInfo.limit);
console.log('Remaining:', rateLimitInfo.remaining);
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
}
```
**Returns:** `RateLimitInfo | null`
##### getUsageLimits()
Get current usage limits and quota information for your account.
```typescript
const limits = await client.getUsageLimits();
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
console.log('Current period cost:', limits.usage.currentPeriodCost);
console.log('Plan:', limits.usage.plan);
```
**Returns:** `Promise<UsageLimits>`
**Response structure:**
```typescript
{
success: boolean
rateLimit: {
sync: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
async: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
authType: string // 'api' or 'manual'
}
usage: {
currentPeriodCost: number
limit: number
plan: string // e.g., 'free', 'pro'
}
}
```
##### setApiKey()
@@ -181,6 +274,20 @@ interface WorkflowExecutionResult {
}
```
### AsyncExecutionResult
```typescript
interface AsyncExecutionResult {
success: boolean;
taskId: string;
status: 'queued';
createdAt: string;
links: {
status: string; // e.g., "/api/jobs/{taskId}"
};
}
```
### WorkflowStatus
```typescript
@@ -192,6 +299,45 @@ interface WorkflowStatus {
}
```
### RateLimitInfo
```typescript
interface RateLimitInfo {
limit: number;
remaining: number;
reset: number;
retryAfter?: number;
}
```
### UsageLimits
```typescript
interface UsageLimits {
success: boolean;
rateLimit: {
sync: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
async: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
authType: string;
};
usage: {
currentPeriodCost: number;
limit: number;
plan: string;
};
}
```
### SimStudioError
```typescript
@@ -201,6 +347,13 @@ class SimStudioError extends Error {
}
```
**Common error codes:**
- `UNAUTHORIZED`: Invalid API key
- `TIMEOUT`: Request timed out
- `RATE_LIMIT_EXCEEDED`: Rate limit exceeded
- `USAGE_LIMIT_EXCEEDED`: Usage limit exceeded
- `EXECUTION_ERROR`: Workflow execution failed
## Examples
### Basic Workflow Execution
@@ -224,7 +377,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -265,7 +418,7 @@ Handle different types of errors that may occur during workflow execution:
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -308,14 +461,14 @@ Configure the client using environment variables:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
baseUrl: process.env.SIM_BASE_URL // optional
});
```
</Tab>
@@ -324,14 +477,14 @@ Configure the client using environment variables:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
</Tab>
@@ -347,7 +500,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -389,7 +542,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -466,7 +619,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -522,7 +675,7 @@ function WorkflowComponent() {
<button onClick={handleExecute} disabled={loading}>
{loading ? 'Executing...' : 'Execute Workflow'}
</button>
{error && <div>Error: {error.message}</div>}
{result && (
<div>
@@ -535,6 +688,251 @@ function WorkflowComponent() {
}
```
### Async Workflow Execution
Execute workflows asynchronously for long-running tasks:
```typescript
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
async: true // Execute asynchronously
});
// Check if result is an async execution
if ('taskId' in result) {
console.log('Task ID:', result.taskId);
console.log('Status endpoint:', result.links.status);
// Poll for completion
let status = await client.getJobStatus(result.taskId);
while (status.status === 'queued' || status.status === 'processing') {
console.log('Current status:', status.status);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
status = await client.getJobStatus(result.taskId);
}
if (status.status === 'completed') {
console.log('Workflow completed!');
console.log('Output:', status.output);
console.log('Duration:', status.metadata.duration);
} else {
console.error('Workflow failed:', status.error);
}
}
} catch (error) {
console.error('Error:', error);
}
}
executeAsync();
```
### Rate Limiting and Retry
Handle rate limits automatically with exponential backoff:
```typescript
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
backoffMultiplier: 2
});
console.log('Success:', result);
} catch (error) {
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
console.error('Rate limit exceeded after all retries');
// Check rate limit info
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
}
}
}
}
```
### Usage Monitoring
Monitor your account usage and limits:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function checkUsage() {
try {
const limits = await client.getUsageLimits();
console.log('=== Rate Limits ===');
console.log('Sync requests:');
console.log(' Limit:', limits.rateLimit.sync.limit);
console.log(' Remaining:', limits.rateLimit.sync.remaining);
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
console.log('\nAsync requests:');
console.log(' Limit:', limits.rateLimit.async.limit);
console.log(' Remaining:', limits.rateLimit.async.remaining);
console.log(' Resets at:', limits.rateLimit.async.resetAt);
console.log(' Is limited:', limits.rateLimit.async.isLimited);
console.log('\n=== Usage ===');
console.log('Current period cost: $' + limits.usage.currentPeriodCost.toFixed(2));
console.log('Limit: $' + limits.usage.limit.toFixed(2));
console.log('Plan:', limits.usage.plan);
const percentUsed = (limits.usage.currentPeriodCost / limits.usage.limit) * 100;
console.log('Usage: ' + percentUsed.toFixed(1) + '%');
if (percentUsed > 80) {
console.warn('⚠️ Warning: You are approaching your usage limit!');
}
} catch (error) {
console.error('Error checking usage:', error);
}
}
checkUsage();
```
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithStreaming() {
try {
// Enable streaming for specific block outputs
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Count to five' },
stream: true,
selectedOutputs: ['agent1.content'] // Use blockName.attribute format
});
console.log('Workflow result:', result);
} catch (error) {
console.error('Error:', error);
}
}
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**React Streaming Example:**
```typescript
import { useState, useEffect } from 'react';
function StreamingWorkflow() {
const [output, setOutput] = useState('');
const [loading, setLoading] = useState(false);
const executeStreaming = async () => {
setLoading(true);
setOutput('');
// IMPORTANT: Make this API call from your backend server, not the browser
// Never expose your API key in client-side code
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
},
body: JSON.stringify({
message: 'Generate a story',
stream: true,
selectedOutputs: ['agent1.content']
})
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (reader) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
setLoading(false);
break;
}
try {
const parsed = JSON.parse(data);
if (parsed.chunk) {
setOutput(prev => prev + parsed.chunk);
} else if (parsed.event === 'done') {
console.log('Execution complete:', parsed.metadata);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
};
return (
<div>
<button onClick={executeStreaming} disabled={loading}>
{loading ? 'Generating...' : 'Start Streaming'}
</button>
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
</div>
);
}
```
## Getting Your API Key
<Steps>
@@ -578,7 +976,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
@@ -594,4 +992,4 @@ const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
## License
Apache-2.0
Apache-2.0

View File

@@ -38,6 +38,84 @@ curl -X POST \
Successful responses return the serialized execution result from the Executor. Errors surface validation, auth, or workflow failures.
## Streaming Responses
Enable real-time streaming to receive workflow output as it's generated, character-by-character. This is useful for displaying AI responses progressively to users.
### Request Parameters
Add these parameters to enable streaming:
- `stream` - Set to `true` to enable Server-Sent Events (SSE) streaming
- `selectedOutputs` - Array of block outputs to stream (e.g., `["agent1.content"]`)
### Block Output Format
Use the `blockName.attribute` format to specify which block outputs to stream:
- Format: `"blockName.attribute"` (e.g., If you want to stream the content of the Agent 1 block, you would use `"agent1.content"`)
- Block names are case-insensitive and spaces are ignored
### Example Request
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Count to five",
"stream": true,
"selectedOutputs": ["agent1.content"]
}'
```
### Response Format
Streaming responses use Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
Each event includes:
- **Streaming chunks**: `{"blockId": "...", "chunk": "text"}` - Real-time text as it's generated
- **Final event**: `{"event": "done", ...}` - Execution metadata and complete results
- **Terminator**: `[DONE]` - Signals end of stream
### Multiple Block Streaming
When `selectedOutputs` includes multiple blocks, each chunk indicates which block produced it:
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Process this request",
"stream": true,
"selectedOutputs": ["agent1.content", "agent2.content"]
}'
```
The `blockId` field in each chunk lets you route output to the correct UI element:
```
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
data: {"blockId":"agent1-uuid","chunk":" complete"}
```
## Output Reference
| Reference | Description |

View File

@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
El SDK oficial de Python para Sim te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Python utilizando el SDK oficial de Python.
<Callout type="info">
El SDK de Python es compatible con Python 3.8+ y proporciona ejecución sincrónica de flujos de trabajo. Todas las ejecuciones de flujos de trabajo son actualmente sincrónicas.
El SDK de Python es compatible con Python 3.8+ con soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
</Callout>
## Instalación
@@ -74,12 +74,17 @@ result = client.execute_workflow(
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
- `timeout` (float, opcional): Tiempo de espera en segundos (predeterminado: 30.0)
- `stream` (bool, opcional): Habilitar respuestas en streaming (predeterminado: False)
- `selected_outputs` (list[str], opcional): Salidas de bloque para transmitir en formato `blockName.attribute` (p. ej., `["agent1.content"]`)
- `async_execution` (bool, opcional): Ejecutar de forma asíncrona (predeterminado: False)
**Devuelve:** `WorkflowExecutionResult`
**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
Cuando `async_execution=True`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
##### get_workflow_status()
Obtiene el estado de un flujo de trabajo (estado de implementación, etc.).
Obtener el estado de un flujo de trabajo (estado de implementación, etc.).
```python
status = client.get_workflow_status("workflow-id")
@@ -93,7 +98,7 @@ print("Is deployed:", status.is_deployed)
##### validate_workflow()
Valida que un flujo de trabajo esté listo para su ejecución.
Validar que un flujo de trabajo está listo para su ejecución.
```python
is_ready = client.validate_workflow("workflow-id")
@@ -107,28 +112,118 @@ if is_ready:
**Devuelve:** `bool`
##### execute_workflow_sync()
##### get_job_status()
<Callout type="info">
Actualmente, este método es idéntico a `execute_workflow()` ya que todas las ejecuciones son síncronas. Este método se proporciona para compatibilidad futura cuando se añada la ejecución asíncrona.
</Callout>
Ejecuta un flujo de trabajo (actualmente síncrono, igual que `execute_workflow()`).
Obtener el estado de una ejecución de trabajo asíncrono.
```python
result = client.execute_workflow_sync(
status = client.get_job_status("task-id-from-async-execution")
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
if status["status"] == "completed":
print("Output:", status["output"])
```
**Parámetros:**
- `task_id` (str): El ID de tarea devuelto de la ejecución asíncrona
**Devuelve:** `Dict[str, Any]`
**Campos de respuesta:**
- `success` (bool): Si la solicitud fue exitosa
- `taskId` (str): El ID de la tarea
- `status` (str): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (dict): Contiene `startedAt`, `completedAt`, y `duration`
- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
- `error` (any, opcional): Detalles del error (cuando falla)
- `estimatedDuration` (int, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
##### execute_with_retry()
Ejecutar un flujo de trabajo con reintento automático en errores de límite de velocidad usando retroceso exponencial.
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"data": "some input"},
timeout=60.0
input_data={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
max_delay=30.0, # Maximum delay in seconds
backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**Parámetros:**
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
- `timeout` (float): Tiempo de espera para la solicitud inicial en segundos
- `timeout` (float, opcional): Tiempo de espera en segundos
- `stream` (bool, opcional): Habilitar respuestas en streaming
- `selected_outputs` (list, opcional): Salidas de bloque para transmitir
- `async_execution` (bool, opcional): Ejecutar de forma asíncrona
- `max_retries` (int, opcional): Número máximo de reintentos (predeterminado: 3)
- `initial_delay` (float, opcional): Retraso inicial en segundos (predeterminado: 1.0)
- `max_delay` (float, opcional): Retraso máximo en segundos (predeterminado: 30.0)
- `backoff_multiplier` (float, opcional): Multiplicador de retroceso (predeterminado: 2.0)
**Devuelve:** `WorkflowExecutionResult`
**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona un encabezado `retry-after`, se utilizará en su lugar.
##### get_rate_limit_info()
Obtiene la información actual del límite de tasa de la última respuesta de la API.
```python
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
print("Limit:", rate_limit_info.limit)
print("Remaining:", rate_limit_info.remaining)
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
```
**Devuelve:** `RateLimitInfo | None`
##### get_usage_limits()
Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
```python
limits = client.get_usage_limits()
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
print("Current period cost:", limits.usage["currentPeriodCost"])
print("Plan:", limits.usage["plan"])
```
**Devuelve:** `UsageLimits`
**Estructura de respuesta:**
```python
{
"success": bool,
"rateLimit": {
"sync": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"async": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"authType": str # 'api' or 'manual'
},
"usage": {
"currentPeriodCost": float,
"limit": float,
"plan": str # e.g., 'free', 'pro'
}
}
```
##### set_api_key()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
### AsyncExecutionResult
```python
@dataclass
class AsyncExecutionResult:
success: bool
task_id: str
status: str # 'queued'
created_at: str
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
```
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
### RateLimitInfo
```python
@dataclass
class RateLimitInfo:
limit: int
remaining: int
reset: int
retry_after: Optional[int] = None
```
### UsageLimits
```python
@dataclass
class UsageLimits:
success: bool
rate_limit: Dict[str, Any]
usage: Dict[str, Any]
```
### SimStudioError
```python
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
self.status = status
```
**Códigos de error comunes:**
- `UNAUTHORIZED`: Clave API inválida
- `TIMEOUT`: Tiempo de espera agotado
- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
## Ejemplos
### Ejecución básica de flujo de trabajo
@@ -205,8 +340,8 @@ class SimStudioError(Exception):
<Step title="Ejecutar el flujo de trabajo">
Ejecuta el flujo de trabajo con tus datos de entrada.
</Step>
<Step title="Gestionar el resultado">
Procesa el resultado de la ejecución y maneja cualquier error.
<Step title="Manejar el resultado">
Procesa el resultado de la ejecución y gestiona cualquier error.
</Step>
</Steps>
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +387,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
from simstudio import SimStudioClient, SimStudioError
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -275,22 +410,22 @@ def execute_with_error_handling():
raise
```
### Uso del administrador de contexto
### Uso del gestor de contexto
Usa el cliente como un administrador de contexto para manejar automáticamente la limpieza de recursos:
Usa el cliente como un gestor de contexto para manejar automáticamente la limpieza de recursos:
```python
from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
```
### Ejecución por lotes de flujos de trabajo
### Ejecución de flujos de trabajo por lotes
Ejecuta múltiples flujos de trabajo de manera eficiente:
@@ -298,7 +433,7 @@ Ejecuta múltiples flujos de trabajo de manera eficiente:
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
### Ejecución asíncrona de flujos de trabajo
Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
```python
import os
import time
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_async():
try:
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
# Check if result is an async execution
if hasattr(result, 'task_id'):
print(f"Task ID: {result.task_id}")
print(f"Status endpoint: {result.links['status']}")
# Poll for completion
status = client.get_job_status(result.task_id)
while status["status"] in ["queued", "processing"]:
print(f"Current status: {status['status']}")
time.sleep(2) # Wait 2 seconds
status = client.get_job_status(result.task_id)
if status["status"] == "completed":
print("Workflow completed!")
print(f"Output: {status['output']}")
print(f"Duration: {status['metadata']['duration']}")
else:
print(f"Workflow failed: {status['error']}")
except Exception as error:
print(f"Error: {error}")
execute_async()
```
### Límite de tasa y reintentos
Maneja los límites de tasa automáticamente con retroceso exponencial:
```python
import os
from simstudio import SimStudioClient, SimStudioError
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_retry_handling():
try:
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
backoff_multiplier=2.0
)
print(f"Success: {result}")
except SimStudioError as error:
if error.code == "RATE_LIMIT_EXCEEDED":
print("Rate limit exceeded after all retries")
# Check rate limit info
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
from datetime import datetime
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
print(f"Rate limit resets at: {reset_time}")
execute_with_retry_handling()
```
### Monitoreo de uso
Monitorea el uso de tu cuenta y sus límites:
```python
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def check_usage():
try:
limits = client.get_usage_limits()
print("=== Rate Limits ===")
print("Sync requests:")
print(f" Limit: {limits.rate_limit['sync']['limit']}")
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
print("\nAsync requests:")
print(f" Limit: {limits.rate_limit['async']['limit']}")
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
print("\n=== Usage ===")
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
print(f"Limit: ${limits.usage['limit']:.2f}")
print(f"Plan: {limits.usage['plan']}")
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
print(f"Usage: {percent_used:.1f}%")
if percent_used > 80:
print("⚠️ Warning: You are approaching your usage limit!")
except Exception as error:
print(f"Error checking usage: {error}")
check_usage()
```
### Ejecución de flujo de trabajo en streaming
Ejecuta flujos de trabajo con respuestas en tiempo real:
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_streaming():
"""Execute workflow with streaming enabled."""
try:
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
print("Workflow result:", result)
except Exception as error:
print("Error:", error)
execute_with_streaming()
```
La respuesta en streaming sigue el formato de Server-Sent Events (SSE):
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**Ejemplo de streaming con Flask:**
```python
from flask import Flask, Response, stream_with_context
import requests
import json
import os
app = Flask(__name__)
@app.route('/stream-workflow')
def stream_workflow():
"""Stream workflow execution to the client."""
def generate():
response = requests.post(
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
headers={
'Content-Type': 'application/json',
'X-API-Key': os.getenv('SIM_API_KEY')
},
json={
'message': 'Generate a story',
'stream': True,
'selectedOutputs': ['agent1.content']
},
stream=True
)
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
data = decoded_line[6:] # Remove 'data: ' prefix
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if 'chunk' in parsed:
yield f"data: {json.dumps(parsed)}\n\n"
elif parsed.get('event') == 'done':
yield f"data: {json.dumps(parsed)}\n\n"
print("Execution complete:", parsed.get('metadata'))
except json.JSONDecodeError:
pass
return Response(
stream_with_context(generate()),
mimetype='text/event-stream'
)
if __name__ == '__main__':
app.run(debug=True)
```
### Configuración del entorno
Configura el cliente usando variables de entorno:
@@ -352,8 +711,8 @@ Configura el cliente usando variables de entorno:
# Development configuration
client = SimStudioClient(
api_key=os.getenv("SIMSTUDIO_API_KEY"),
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
api_key=os.getenv("SIM_API_KEY")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +724,13 @@ Configura el cliente usando variables de entorno:
from simstudio import SimStudioClient
# Production configuration with error handling
api_key = os.getenv("SIMSTUDIO_API_KEY")
api_key = os.getenv("SIM_API_KEY")
if not api_key:
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```

View File

@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
El SDK oficial de TypeScript/JavaScript para Sim proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como con navegadores, lo que te permite ejecutar flujos de trabajo de forma programática desde tus aplicaciones Node.js, aplicaciones web y otros entornos JavaScript. Todas las ejecuciones de flujos de trabajo son actualmente síncronas.
El SDK oficial de TypeScript/JavaScript para Sim proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como de navegador, lo que te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Node.js, aplicaciones web y otros entornos JavaScript.
<Callout type="info">
El SDK de TypeScript proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como con navegadores. Todas las ejecuciones de flujos de trabajo son actualmente síncronas.
El SDK de TypeScript proporciona seguridad de tipos completa, soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
</Callout>
## Instalación
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
- `options` (ExecutionOptions, opcional):
- `input` (any): Datos de entrada para pasar al flujo de trabajo
- `timeout` (number): Tiempo de espera en milisegundos (predeterminado: 30000)
- `stream` (boolean): Habilitar respuestas en streaming (predeterminado: false)
- `selectedOutputs` (string[]): Bloquear salidas para transmitir en formato `blockName.attribute` (por ejemplo, `["agent1.content"]`)
- `async` (boolean): Ejecutar de forma asíncrona (predeterminado: false)
**Devuelve:** `Promise<WorkflowExecutionResult>`
**Devuelve:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
Cuando `async: true`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
##### getWorkflowStatus()
@@ -128,32 +133,121 @@ if (isReady) {
**Devuelve:** `Promise<boolean>`
##### executeWorkflowSync()
##### getJobStatus()
<Callout type="info">
Actualmente, este método es idéntico a `executeWorkflow()` ya que todas las ejecuciones son síncronas. Este método se proporciona para compatibilidad futura cuando se añada la ejecución asíncrona.
</Callout>
Ejecutar un flujo de trabajo (actualmente síncrono, igual que `executeWorkflow()`).
Obtener el estado de una ejecución de trabajo asíncrono.
```typescript
const result = await client.executeWorkflowSync('workflow-id', {
input: { data: 'some input' },
timeout: 60000
const status = await client.getJobStatus('task-id-from-async-execution');
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
if (status.status === 'completed') {
console.log('Output:', status.output);
}
```
**Parámetros:**
- `taskId` (string): El ID de tarea devuelto de la ejecución asíncrona
**Devuelve:** `Promise<JobStatus>`
**Campos de respuesta:**
- `success` (boolean): Si la solicitud fue exitosa
- `taskId` (string): El ID de la tarea
- `status` (string): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (object): Contiene `startedAt`, `completedAt`, y `duration`
- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
- `error` (any, opcional): Detalles del error (cuando falla)
- `estimatedDuration` (number, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
##### executeWithRetry()
Ejecuta un flujo de trabajo con reintento automático en errores de límite de tasa utilizando retroceso exponencial.
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
initialDelay: 1000, // Initial delay in ms (1 second)
maxDelay: 30000, // Maximum delay in ms (30 seconds)
backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**Parámetros:**
- `workflowId` (string): El ID del flujo de trabajo a ejecutar
- `options` (ExecutionOptions, opcional):
- `input` (any): Datos de entrada para pasar al flujo de trabajo
- `timeout` (number): Tiempo de espera para la solicitud inicial en milisegundos
- `options` (ExecutionOptions, opcional): Igual que `executeWorkflow()`
- `retryOptions` (RetryOptions, opcional):
- `maxRetries` (number): Número máximo de reintentos (predeterminado: 3)
- `initialDelay` (number): Retraso inicial en ms (predeterminado: 1000)
- `maxDelay` (number): Retraso máximo en ms (predeterminado: 30000)
- `backoffMultiplier` (number): Multiplicador de retroceso (predeterminado: 2)
**Devuelve:** `Promise<WorkflowExecutionResult>`
**Devuelve:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona una cabecera `retry-after`, se utilizará en su lugar.
##### getRateLimitInfo()
Obtiene la información actual del límite de tasa de la última respuesta de la API.
```typescript
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Limit:', rateLimitInfo.limit);
console.log('Remaining:', rateLimitInfo.remaining);
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
}
```
**Devuelve:** `RateLimitInfo | null`
##### getUsageLimits()
Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
```typescript
const limits = await client.getUsageLimits();
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
console.log('Current period cost:', limits.usage.currentPeriodCost);
console.log('Plan:', limits.usage.plan);
```
**Devuelve:** `Promise<UsageLimits>`
**Estructura de respuesta:**
```typescript
{
success: boolean
rateLimit: {
sync: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
async: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
authType: string // 'api' or 'manual'
}
usage: {
currentPeriodCost: number
limit: number
plan: string // e.g., 'free', 'pro'
}
}
```
##### setApiKey()
Actualizar la clave API.
Actualiza la clave API.
```typescript
client.setApiKey('new-api-key');
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
##### setBaseUrl()
Actualizar la URL base.
Actualiza la URL base.
```typescript
client.setBaseUrl('https://my-custom-domain.com');
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
### AsyncExecutionResult
```typescript
interface AsyncExecutionResult {
success: boolean;
taskId: string;
status: 'queued';
createdAt: string;
links: {
status: string; // e.g., "/api/jobs/{taskId}"
};
}
```
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
### RateLimitInfo
```typescript
interface RateLimitInfo {
limit: number;
remaining: number;
reset: number;
retryAfter?: number;
}
```
### UsageLimits
```typescript
interface UsageLimits {
success: boolean;
rateLimit: {
sync: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
async: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
authType: string;
};
usage: {
currentPeriodCost: number;
limit: number;
plan: string;
};
}
```
### SimStudioError
```typescript
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
}
```
**Códigos de error comunes:**
- `UNAUTHORIZED`: Clave API inválida
- `TIMEOUT`: Tiempo de espera agotado
- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
## Ejemplos
### Ejecución básica de flujo de trabajo
@@ -216,13 +370,13 @@ class SimStudioError extends Error {
Configura el SimStudioClient con tu clave API.
</Step>
<Step title="Validar el flujo de trabajo">
Comprueba si el flujo de trabajo está implementado y listo para su ejecución.
Comprueba si el flujo de trabajo está desplegado y listo para su ejecución.
</Step>
<Step title="Ejecutar el flujo de trabajo">
Ejecuta el flujo de trabajo con tus datos de entrada.
</Step>
<Step title="Gestionar el resultado">
Procesa el resultado de la ejecución y maneja cualquier error.
<Step title="Manejar el resultado">
Procesa el resultado de la ejecución y gestiona cualquier error.
</Step>
</Steps>
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ Configura el cliente usando variables de entorno:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +487,14 @@ Configura el cliente usando variables de entorno:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -349,7 +503,7 @@ Configura el cliente usando variables de entorno:
### Integración con Express de Node.js
Integración con un servidor Express.js:
Integra con un servidor Express.js:
```typescript
import express from 'express';
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -430,7 +584,7 @@ export default async function handler(
### Uso del navegador
Uso en el navegador (con la configuración CORS adecuada):
Uso en el navegador (con configuración CORS adecuada):
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
### Ejemplo de hook de React
Crea un hook personalizado de React para la ejecución del flujo de trabajo:
Crea un hook personalizado de React para la ejecución de flujos de trabajo:
```typescript
import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
<button onClick={handleExecute} disabled={loading}>
{loading ? 'Executing...' : 'Execute Workflow'}
</button>
{error && <div>Error: {error.message}</div>}
{result && (
<div>
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
## Obtener tu clave API
### Ejecución asíncrona de flujos de trabajo
Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
```typescript
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
async: true // Execute asynchronously
});
// Check if result is an async execution
if ('taskId' in result) {
console.log('Task ID:', result.taskId);
console.log('Status endpoint:', result.links.status);
// Poll for completion
let status = await client.getJobStatus(result.taskId);
while (status.status === 'queued' || status.status === 'processing') {
console.log('Current status:', status.status);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
status = await client.getJobStatus(result.taskId);
}
if (status.status === 'completed') {
console.log('Workflow completed!');
console.log('Output:', status.output);
console.log('Duration:', status.metadata.duration);
} else {
console.error('Workflow failed:', status.error);
}
}
} catch (error) {
console.error('Error:', error);
}
}
executeAsync();
```
### Límite de tasa y reintentos
Maneja límites de tasa automáticamente con retroceso exponencial:
```typescript
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
backoffMultiplier: 2
});
console.log('Success:', result);
} catch (error) {
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
console.error('Rate limit exceeded after all retries');
// Check rate limit info
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
}
}
}
}
```
### Monitoreo de uso
Monitorea el uso de tu cuenta y sus límites:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function checkUsage() {
try {
const limits = await client.getUsageLimits();
console.log('=== Rate Limits ===');
console.log('Sync requests:');
console.log(' Limit:', limits.rateLimit.sync.limit);
console.log(' Remaining:', limits.rateLimit.sync.remaining);
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
console.log('\nAsync requests:');
console.log(' Limit:', limits.rateLimit.async.limit);
console.log(' Remaining:', limits.rateLimit.async.remaining);
console.log(' Resets at:', limits.rateLimit.async.resetAt);
console.log(' Is limited:', limits.rateLimit.async.isLimited);
console.log('\n=== Usage ===');
console.log('Current period cost:
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithStreaming() {
try {
// Habilita streaming para salidas de bloques específicos
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Count to five' },
stream: true,
selectedOutputs: ['agent1.content'] // Usa el formato blockName.attribute
});
console.log('Resultado del flujo de trabajo:', result);
} catch (error) {
console.error('Error:', error);
}
}
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", dos"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**React Streaming Example:**
```typescript
import { useState, useEffect } from 'react';
function StreamingWorkflow() {
const [output, setOutput] = useState('');
const [loading, setLoading] = useState(false);
const executeStreaming = async () => {
setLoading(true);
setOutput('');
// IMPORTANT: Make this API call from your backend server, not the browser
// Never expose your API key in client-side code
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
},
body: JSON.stringify({
message: 'Generate a story',
stream: true,
selectedOutputs: ['agent1.content']
})
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (reader) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
setLoading(false);
break;
}
try {
const parsed = JSON.parse(data);
if (parsed.chunk) {
setOutput(prev => prev + parsed.chunk);
} else if (parsed.event === 'done') {
console.log('Execution complete:', parsed.metadata);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
};
return (
<div>
<button onClick={executeStreaming} disabled={loading}>
{loading ? 'Generando...' : 'Iniciar streaming'}
</button>
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
</div>
);
}
```
## Getting Your API Key
<Steps>
<Step title="Inicia sesión en Sim">
Navega a [Sim](https://sim.ai) e inicia sesión en tu cuenta.
<Step title="Log in to Sim">
Navigate to [Sim](https://sim.ai) and log in to your account.
</Step>
<Step title="Abre tu flujo de trabajo">
Navega al flujo de trabajo que quieres ejecutar programáticamente.
<Step title="Open your workflow">
Navigate to the workflow you want to execute programmatically.
</Step>
<Step title="Despliega tu flujo de trabajo">
Haz clic en "Deploy" para desplegar tu flujo de trabajo si aún no ha sido desplegado.
<Step title="Deploy your workflow">
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
</Step>
<Step title="Crea o selecciona una clave API">
Durante el proceso de despliegue, selecciona o crea una clave API.
<Step title="Create or select an API key">
During the deployment process, select or create an API key.
</Step>
<Step title="Copia la clave API">
Copia la clave API para usarla en tu aplicación TypeScript/JavaScript.
<Step title="Copy the API key">
Copy the API key to use in your TypeScript/JavaScript application.
</Step>
</Steps>
<Callout type="warning">
Mantén tu clave API segura y nunca la incluyas en el control de versiones. Usa variables de entorno o gestión de configuración segura.
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
</Callout>
## Requisitos
## Requirements
- Node.js 16+
- TypeScript 5.0+ (para proyectos TypeScript)
- TypeScript 5.0+ (for TypeScript projects)
## Soporte para TypeScript
## TypeScript Support
El SDK está escrito en TypeScript y proporciona seguridad de tipos completa:
The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -588,13 +971,13 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
message: 'Hello, TypeScript!'
message: '¡Hola, TypeScript!'
}
});
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
## Licencia
## License
Apache-2.0
Apache-2.0

View File

@@ -38,15 +38,93 @@ curl -X POST \
Las respuestas exitosas devuelven el resultado de ejecución serializado del Ejecutor. Los errores muestran fallos de validación, autenticación o flujo de trabajo.
## Respuestas en streaming
Habilita el streaming en tiempo real para recibir la salida del flujo de trabajo a medida que se genera, carácter por carácter. Esto es útil para mostrar las respuestas de IA progresivamente a los usuarios.
### Parámetros de solicitud
Añade estos parámetros para habilitar el streaming:
- `stream` - Establece a `true` para habilitar el streaming de eventos enviados por el servidor (SSE)
- `selectedOutputs` - Array de salidas de bloques para transmitir (p. ej., `["agent1.content"]`)
### Formato de salida de bloque
Usa el formato `blockName.attribute` para especificar qué salidas de bloques transmitir:
- Formato: `"blockName.attribute"` (p. ej., si quieres transmitir el contenido del bloque Agente 1, usarías `"agent1.content"`)
- Los nombres de los bloques no distinguen entre mayúsculas y minúsculas y se ignoran los espacios
### Ejemplo de solicitud
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Count to five",
"stream": true,
"selectedOutputs": ["agent1.content"]
}'
```
### Formato de respuesta
Las respuestas en streaming utilizan el formato de eventos enviados por el servidor (SSE):
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
Cada evento incluye:
- **Fragmentos de streaming**: `{"blockId": "...", "chunk": "text"}` - Texto en tiempo real a medida que se genera
- **Evento final**: `{"event": "done", ...}` - Metadatos de ejecución y resultados completos
- **Terminador**: `[DONE]` - Señala el fin del stream
### Streaming de múltiples bloques
Cuando `selectedOutputs` incluye múltiples bloques, cada fragmento indica qué bloque lo produjo:
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Process this request",
"stream": true,
"selectedOutputs": ["agent1.content", "agent2.content"]
}'
```
El campo `blockId` en cada fragmento te permite dirigir la salida al elemento de UI correcto:
```
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
data: {"blockId":"agent1-uuid","chunk":" complete"}
```
## Referencia de salida
| Referencia | Descripción |
|-----------|-------------|
| `<api.field>` | Campo definido en el Formato de Entrada |
| `<api.input>` | Cuerpo completo estructurado de la solicitud |
| `<api.field>` | Campo definido en el formato de entrada |
| `<api.input>` | Cuerpo de solicitud estructurado completo |
Si no se define un Formato de Entrada, el ejecutor expone el JSON sin procesar solo en `<api.input>`.
Si no se define un formato de entrada, el ejecutor expone el JSON sin procesar solo en `<api.input>`.
<Callout type="warning">
Un flujo de trabajo puede contener solo un Disparador de API. Publica una nueva implementación después de realizar cambios para que el punto de conexión se mantenga actualizado.
Un flujo de trabajo puede contener solo un disparador de API. Publica una nueva implementación después de los cambios para que el endpoint se mantenga actualizado.
</Callout>

View File

@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Le SDK Python officiel pour Sim vous permet d'exécuter des workflows de manière programmatique à partir de vos applications Python en utilisant le SDK Python officiel.
<Callout type="info">
Le SDK Python prend en charge Python 3.8+ et fournit une exécution synchrone des workflows. Toutes les exécutions de workflow sont actuellement synchrones.
Le SDK Python prend en charge Python 3.8+ avec support d'exécution asynchrone, limitation automatique du débit avec backoff exponentiel, et suivi d'utilisation.
</Callout>
## Installation
@@ -71,11 +71,16 @@ result = client.execute_workflow(
```
**Paramètres :**
- `workflow_id` (str) : L'ID du workflow à exécuter
- `workflow_id` (str) : L'identifiant du workflow à exécuter
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
- `timeout` (float, facultatif) : Délai d'attente en secondes (par défaut : 30.0)
- `timeout` (float, facultatif) : Délai d'expiration en secondes (par défaut : 30.0)
- `stream` (bool, facultatif) : Activer les réponses en streaming (par défaut : False)
- `selected_outputs` (list[str], facultatif) : Sorties de blocs à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone (par défaut : False)
**Retourne :** `WorkflowExecutionResult`
**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
Lorsque `async_execution=True`, retourne immédiatement un identifiant de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
##### get_workflow_status()
@@ -87,13 +92,13 @@ print("Is deployed:", status.is_deployed)
```
**Paramètres :**
- `workflow_id` (str) : L'ID du workflow
- `workflow_id` (str) : L'identifiant du workflow
**Retourne :** `WorkflowStatus`
##### validate_workflow()
Valide qu'un workflow est prêt pour l'exécution.
Valider qu'un workflow est prêt pour l'exécution.
```python
is_ready = client.validate_workflow("workflow-id")
@@ -107,32 +112,122 @@ if is_ready:
**Retourne :** `bool`
##### execute_workflow_sync()
##### get_job_status()
<Callout type="info">
Actuellement, cette méthode est identique à `execute_workflow()` puisque toutes les exécutions sont synchrones. Cette méthode est fournie pour une compatibilité future lorsque l'exécution asynchrone sera ajoutée.
</Callout>
Exécute un workflow (actuellement synchrone, identique à `execute_workflow()`).
Obtenir le statut d'une exécution de tâche asynchrone.
```python
result = client.execute_workflow_sync(
status = client.get_job_status("task-id-from-async-execution")
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
if status["status"] == "completed":
print("Output:", status["output"])
```
**Paramètres :**
- `task_id` (str) : L'identifiant de tâche retourné par l'exécution asynchrone
**Retourne :** `Dict[str, Any]`
**Champs de réponse :**
- `success` (bool) : Si la requête a réussi
- `taskId` (str) : L'identifiant de la tâche
- `status` (str) : L'un des états suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (dict) : Contient `startedAt`, `completedAt`, et `duration`
- `output` (any, facultatif) : La sortie du workflow (une fois terminé)
- `error` (any, facultatif) : Détails de l'erreur (en cas d'échec)
- `estimatedDuration` (int, facultatif) : Durée estimée en millisecondes (lors du traitement/mise en file d'attente)
##### execute_with_retry()
Exécuter un workflow avec réessai automatique en cas d'erreurs de limitation de débit, en utilisant un backoff exponentiel.
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"data": "some input"},
timeout=60.0
input_data={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
max_delay=30.0, # Maximum delay in seconds
backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**Paramètres :**
- `workflow_id` (str) : L'identifiant du workflow à exécuter
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
- `timeout` (float) : Délai d'attente pour la requête initiale en secondes
- `timeout` (float, facultatif) : Délai d'expiration en secondes
- `stream` (bool, facultatif) : Activer les réponses en streaming
- `selected_outputs` (list, facultatif) : Sorties de blocs à diffuser
- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone
- `max_retries` (int, facultatif) : Nombre maximum de tentatives (par défaut : 3)
- `initial_delay` (float, facultatif) : Délai initial en secondes (par défaut : 1.0)
- `max_delay` (float, facultatif) : Délai maximum en secondes (par défaut : 30.0)
- `backoff_multiplier` (float, facultatif) : Multiplicateur de backoff (par défaut : 2.0)
**Retourne :** `WorkflowExecutionResult`
**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25% pour éviter l'effet de horde. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
##### get_rate_limit_info()
Obtenir les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
```python
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
print("Limit:", rate_limit_info.limit)
print("Remaining:", rate_limit_info.remaining)
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
```
**Retourne :** `RateLimitInfo | None`
##### get_usage_limits()
Obtenir les limites d'utilisation actuelles et les informations de quota pour votre compte.
```python
limits = client.get_usage_limits()
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
print("Current period cost:", limits.usage["currentPeriodCost"])
print("Plan:", limits.usage["plan"])
```
**Retourne :** `UsageLimits`
**Structure de la réponse :**
```python
{
"success": bool,
"rateLimit": {
"sync": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"async": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"authType": str # 'api' or 'manual'
},
"usage": {
"currentPeriodCost": float,
"limit": float,
"plan": str # e.g., 'free', 'pro'
}
}
```
##### set_api_key()
Met à jour la clé API.
Mettre à jour la clé API.
```python
client.set_api_key("new-api-key")
@@ -140,7 +235,7 @@ client.set_api_key("new-api-key")
##### set_base_url()
Met à jour l'URL de base.
Mettre à jour l'URL de base.
```python
client.set_base_url("https://my-custom-domain.com")
@@ -148,7 +243,7 @@ client.set_base_url("https://my-custom-domain.com")
##### close()
Ferme la session HTTP sous-jacente.
Fermer la session HTTP sous-jacente.
```python
client.close()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
### AsyncExecutionResult
```python
@dataclass
class AsyncExecutionResult:
success: bool
task_id: str
status: str # 'queued'
created_at: str
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
```
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
### RateLimitInfo
```python
@dataclass
class RateLimitInfo:
limit: int
remaining: int
reset: int
retry_after: Optional[int] = None
```
### UsageLimits
```python
@dataclass
class UsageLimits:
success: bool
rate_limit: Dict[str, Any]
usage: Dict[str, Any]
```
### SimStudioError
```python
@@ -191,19 +319,26 @@ class SimStudioError(Exception):
self.status = status
```
**Codes d'erreur courants :**
- `UNAUTHORIZED` : Clé API invalide
- `TIMEOUT` : Délai d'attente de la requête dépassé
- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
- `EXECUTION_ERROR` : Échec de l'exécution du workflow
## Exemples
### Exécution de flux de travail basique
### Exécution basique d'un workflow
<Steps>
<Step title="Initialiser le client">
Configurez le SimStudioClient avec votre clé API.
</Step>
<Step title="Valider le flux de travail">
Vérifiez si le flux de travail est déployé et prêt pour l'exécution.
<Step title="Valider le workflow">
Vérifiez si le workflow est déployé et prêt pour l'exécution.
</Step>
<Step title="Exécuter le flux de travail">
Lancez le flux de travail avec vos données d'entrée.
<Step title="Exécuter le workflow">
Lancez le workflow avec vos données d'entrée.
</Step>
<Step title="Gérer le résultat">
Traitez le résultat de l'exécution et gérez les éventuelles erreurs.
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -246,13 +381,13 @@ run_workflow()
### Gestion des erreurs
Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du flux de travail :
Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du workflow :
```python
from simstudio import SimStudioClient, SimStudioError
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,21 +419,21 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
```
### Exécution de flux de travail par lots
### Exécution de workflows par lots
Exécutez plusieurs flux de travail efficacement :
Exécutez plusieurs workflows efficacement :
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
### Exécution asynchrone de workflow
Exécutez des workflows de manière asynchrone pour les tâches de longue durée :
```python
import os
import time
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_async():
try:
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
# Check if result is an async execution
if hasattr(result, 'task_id'):
print(f"Task ID: {result.task_id}")
print(f"Status endpoint: {result.links['status']}")
# Poll for completion
status = client.get_job_status(result.task_id)
while status["status"] in ["queued", "processing"]:
print(f"Current status: {status['status']}")
time.sleep(2) # Wait 2 seconds
status = client.get_job_status(result.task_id)
if status["status"] == "completed":
print("Workflow completed!")
print(f"Output: {status['output']}")
print(f"Duration: {status['metadata']['duration']}")
else:
print(f"Workflow failed: {status['error']}")
except Exception as error:
print(f"Error: {error}")
execute_async()
```
### Limitation de débit et nouvelle tentative
Gérez les limites de débit automatiquement avec un retrait exponentiel :
```python
import os
from simstudio import SimStudioClient, SimStudioError
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_retry_handling():
try:
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
backoff_multiplier=2.0
)
print(f"Success: {result}")
except SimStudioError as error:
if error.code == "RATE_LIMIT_EXCEEDED":
print("Rate limit exceeded after all retries")
# Check rate limit info
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
from datetime import datetime
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
print(f"Rate limit resets at: {reset_time}")
execute_with_retry_handling()
```
### Surveillance de l'utilisation
Surveillez l'utilisation et les limites de votre compte :
```python
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def check_usage():
try:
limits = client.get_usage_limits()
print("=== Rate Limits ===")
print("Sync requests:")
print(f" Limit: {limits.rate_limit['sync']['limit']}")
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
print("\nAsync requests:")
print(f" Limit: {limits.rate_limit['async']['limit']}")
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
print("\n=== Usage ===")
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
print(f"Limit: ${limits.usage['limit']:.2f}")
print(f"Plan: {limits.usage['plan']}")
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
print(f"Usage: {percent_used:.1f}%")
if percent_used > 80:
print("⚠️ Warning: You are approaching your usage limit!")
except Exception as error:
print(f"Error checking usage: {error}")
check_usage()
```
### Exécution de workflow en streaming
Exécutez des workflows avec des réponses en streaming en temps réel :
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_streaming():
"""Execute workflow with streaming enabled."""
try:
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
print("Workflow result:", result)
except Exception as error:
print("Error:", error)
execute_with_streaming()
```
La réponse en streaming suit le format Server-Sent Events (SSE) :
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**Exemple de streaming avec Flask :**
```python
from flask import Flask, Response, stream_with_context
import requests
import json
import os
app = Flask(__name__)
@app.route('/stream-workflow')
def stream_workflow():
"""Stream workflow execution to the client."""
def generate():
response = requests.post(
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
headers={
'Content-Type': 'application/json',
'X-API-Key': os.getenv('SIM_API_KEY')
},
json={
'message': 'Generate a story',
'stream': True,
'selectedOutputs': ['agent1.content']
},
stream=True
)
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
data = decoded_line[6:] # Remove 'data: ' prefix
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if 'chunk' in parsed:
yield f"data: {json.dumps(parsed)}\n\n"
elif parsed.get('event') == 'done':
yield f"data: {json.dumps(parsed)}\n\n"
print("Execution complete:", parsed.get('metadata'))
except json.JSONDecodeError:
pass
return Response(
stream_with_context(generate()),
mimetype='text/event-stream'
)
if __name__ == '__main__':
app.run(debug=True)
```
### Configuration de l'environnement
Configurez le client en utilisant des variables d'environnement :
@@ -352,8 +711,8 @@ Configurez le client en utilisant des variables d'environnement :
# Development configuration
client = SimStudioClient(
api_key=os.getenv("SIMSTUDIO_API_KEY"),
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
api_key=os.getenv("SIM_API_KEY")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,30 +724,30 @@ Configurez le client en utilisant des variables d'environnement :
from simstudio import SimStudioClient
# Production configuration with error handling
api_key = os.getenv("SIMSTUDIO_API_KEY")
api_key = os.getenv("SIM_API_KEY")
if not api_key:
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
</Tab>
</Tabs>
## Obtenir votre clé API
## Obtention de votre clé API
<Steps>
<Step title="Connectez-vous à Sim">
Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
</Step>
<Step title="Ouvrez votre flux de travail">
Naviguez vers le flux de travail que vous souhaitez exécuter par programmation.
<Step title="Ouvrez votre workflow">
Accédez au workflow que vous souhaitez exécuter par programmation.
</Step>
<Step title="Déployez votre flux de travail">
Cliquez sur "Déployer" pour déployer votre flux de travail s'il n'a pas encore été déployé.
<Step title="Déployez votre workflow">
Cliquez sur "Déployer" pour déployer votre workflow s'il n'a pas encore été déployé.
</Step>
<Step title="Créez ou sélectionnez une clé API">
Pendant le processus de déploiement, sélectionnez ou créez une clé API.

View File

@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Le SDK officiel TypeScript/JavaScript pour Sim offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur, vous permettant d'exécuter des workflows de manière programmatique depuis vos applications Node.js, applications web et autres environnements JavaScript. Toutes les exécutions de workflow sont actuellement synchrones.
Le SDK officiel TypeScript/JavaScript pour Sim offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur, vous permettant d'exécuter des workflows par programmation depuis vos applications Node.js, applications web et autres environnements JavaScript.
<Callout type="info">
Le SDK TypeScript offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur. Toutes les exécutions de workflow sont actuellement synchrones.
Le SDK TypeScript offre une sécurité de type complète, la prise en charge de l'exécution asynchrone, une limitation automatique du débit avec backoff exponentiel et le suivi d'utilisation.
</Callout>
## Installation
@@ -91,12 +91,17 @@ const result = await client.executeWorkflow('workflow-id', {
```
**Paramètres :**
- `workflowId` (string) : L'identifiant du workflow à exécuter
- `options` (ExecutionOptions, facultatif) :
- `workflowId` (string) : L'ID du workflow à exécuter
- `options` (ExecutionOptions, optionnel) :
- `input` (any) : Données d'entrée à transmettre au workflow
- `timeout` (number) : Délai d'expiration en millisecondes (par défaut : 30000)
- `stream` (boolean) : Activer les réponses en streaming (par défaut : false)
- `selectedOutputs` (string[]) : Bloquer les sorties à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
- `async` (boolean) : Exécuter de manière asynchrone (par défaut : false)
**Retourne :** `Promise<WorkflowExecutionResult>`
**Retourne :** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
Lorsque `async: true`, retourne immédiatement avec un ID de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
##### getWorkflowStatus()
@@ -108,7 +113,7 @@ console.log('Is deployed:', status.isDeployed);
```
**Paramètres :**
- `workflowId` (string) : L'identifiant du workflow
- `workflowId` (string) : L'ID du workflow
**Retourne :** `Promise<WorkflowStatus>`
@@ -124,36 +129,125 @@ if (isReady) {
```
**Paramètres :**
- `workflowId` (string) : L'identifiant du workflow
- `workflowId` (string) : L'ID du workflow
**Retourne :** `Promise<boolean>`
##### executeWorkflowSync()
##### getJobStatus()
<Callout type="info">
Actuellement, cette méthode est identique à `executeWorkflow()` puisque toutes les exécutions sont synchrones. Cette méthode est fournie pour une compatibilité future lorsque l'exécution asynchrone sera ajoutée.
</Callout>
Exécuter un workflow (actuellement synchrone, identique à `executeWorkflow()`).
Obtenir le statut d'une exécution de tâche asynchrone.
```typescript
const result = await client.executeWorkflowSync('workflow-id', {
input: { data: 'some input' },
timeout: 60000
const status = await client.getJobStatus('task-id-from-async-execution');
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
if (status.status === 'completed') {
console.log('Output:', status.output);
}
```
**Paramètres :**
- `taskId` (string) : L'ID de tâche retourné par l'exécution asynchrone
**Retourne :** `Promise<JobStatus>`
**Champs de réponse :**
- `success` (boolean) : Indique si la requête a réussi
- `taskId` (string) : L'ID de la tâche
- `status` (string) : L'un des statuts suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (object) : Contient `startedAt`, `completedAt`, et `duration`
- `output` (any, optionnel) : La sortie du workflow (une fois terminé)
- `error` (any, optionnel) : Détails de l'erreur (en cas d'échec)
- `estimatedDuration` (number, optionnel) : Durée estimée en millisecondes (lorsqu'en traitement/en file d'attente)
##### executeWithRetry()
Exécute un workflow avec une nouvelle tentative automatique en cas d'erreurs de limite de débit en utilisant un backoff exponentiel.
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
initialDelay: 1000, // Initial delay in ms (1 second)
maxDelay: 30000, // Maximum delay in ms (30 seconds)
backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**Paramètres :**
- `workflowId` (string) : L'identifiant du workflow à exécuter
- `options` (ExecutionOptions, facultatif) :
- `input` (any) : Données d'entrée à transmettre au workflow
- `timeout` (number) : Délai d'expiration pour la requête initiale en millisecondes
- `options` (ExecutionOptions, facultatif) : Identique à `executeWorkflow()`
- `retryOptions` (RetryOptions, facultatif) :
- `maxRetries` (number) : Nombre maximum de tentatives (par défaut : 3)
- `initialDelay` (number) : Délai initial en ms (par défaut : 1000)
- `maxDelay` (number) : Délai maximum en ms (par défaut : 30000)
- `backoffMultiplier` (number) : Multiplicateur de backoff (par défaut : 2)
**Retourne :** `Promise<WorkflowExecutionResult>`
**Retourne :** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25 % pour éviter l'effet de rafale. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
##### getRateLimitInfo()
Obtient les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
```typescript
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Limit:', rateLimitInfo.limit);
console.log('Remaining:', rateLimitInfo.remaining);
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
}
```
**Retourne :** `RateLimitInfo | null`
##### getUsageLimits()
Obtient les limites d'utilisation actuelles et les informations de quota pour votre compte.
```typescript
const limits = await client.getUsageLimits();
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
console.log('Current period cost:', limits.usage.currentPeriodCost);
console.log('Plan:', limits.usage.plan);
```
**Retourne :** `Promise<UsageLimits>`
**Structure de la réponse :**
```typescript
{
success: boolean
rateLimit: {
sync: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
async: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
authType: string // 'api' or 'manual'
}
usage: {
currentPeriodCost: number
limit: number
plan: string // e.g., 'free', 'pro'
}
}
```
##### setApiKey()
Mettre à jour la clé API.
Met à jour la clé API.
```typescript
client.setApiKey('new-api-key');
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
##### setBaseUrl()
Mettre à jour l'URL de base.
Met à jour l'URL de base.
```typescript
client.setBaseUrl('https://my-custom-domain.com');
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
### AsyncExecutionResult
```typescript
interface AsyncExecutionResult {
success: boolean;
taskId: string;
status: 'queued';
createdAt: string;
links: {
status: string; // e.g., "/api/jobs/{taskId}"
};
}
```
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
### RateLimitInfo
```typescript
interface RateLimitInfo {
limit: number;
remaining: number;
reset: number;
retryAfter?: number;
}
```
### UsageLimits
```typescript
interface UsageLimits {
success: boolean;
rateLimit: {
sync: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
async: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
authType: string;
};
usage: {
currentPeriodCost: number;
limit: number;
plan: string;
};
}
```
### SimStudioError
```typescript
@@ -207,9 +354,16 @@ class SimStudioError extends Error {
}
```
**Codes d'erreur courants :**
- `UNAUTHORIZED` : Clé API invalide
- `TIMEOUT` : Délai d'attente de la requête dépassé
- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
- `EXECUTION_ERROR` : Échec de l'exécution du workflow
## Exemples
### Exécution de workflow basique
### Exécution basique d'un workflow
<Steps>
<Step title="Initialiser le client">
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ Configurez le client en utilisant des variables d'environnement :
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +487,14 @@ Configurez le client en utilisant des variables d'environnement :
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
### Exemple de hook React
Créez un hook React personnalisé pour l'exécution du workflow :
Créer un hook React personnalisé pour l'exécution de workflow :
```typescript
import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
<button onClick={handleExecute} disabled={loading}>
{loading ? 'Executing...' : 'Execute Workflow'}
</button>
{error && <div>Error: {error.message}</div>}
{result && (
<div>
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
## Obtenir votre clé API
### Exécution asynchrone de workflow
Exécuter des workflows de manière asynchrone pour les tâches de longue durée :
```typescript
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
async: true // Execute asynchronously
});
// Check if result is an async execution
if ('taskId' in result) {
console.log('Task ID:', result.taskId);
console.log('Status endpoint:', result.links.status);
// Poll for completion
let status = await client.getJobStatus(result.taskId);
while (status.status === 'queued' || status.status === 'processing') {
console.log('Current status:', status.status);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
status = await client.getJobStatus(result.taskId);
}
if (status.status === 'completed') {
console.log('Workflow completed!');
console.log('Output:', status.output);
console.log('Duration:', status.metadata.duration);
} else {
console.error('Workflow failed:', status.error);
}
}
} catch (error) {
console.error('Error:', error);
}
}
executeAsync();
```
### Limitation de débit et nouvelle tentative
Gérer automatiquement les limites de débit avec backoff exponentiel :
```typescript
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
backoffMultiplier: 2
});
console.log('Success:', result);
} catch (error) {
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
console.error('Rate limit exceeded after all retries');
// Check rate limit info
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
}
}
}
}
```
### Surveillance d'utilisation
Surveiller l'utilisation et les limites de votre compte :
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function checkUsage() {
try {
const limits = await client.getUsageLimits();
console.log('=== Rate Limits ===');
console.log('Sync requests:');
console.log(' Limit:', limits.rateLimit.sync.limit);
console.log(' Remaining:', limits.rateLimit.sync.remaining);
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
console.log('\nAsync requests:');
console.log(' Limit:', limits.rateLimit.async.limit);
console.log(' Remaining:', limits.rateLimit.async.remaining);
console.log(' Resets at:', limits.rateLimit.async.resetAt);
console.log(' Is limited:', limits.rateLimit.async.isLimited);
console.log('\n=== Usage ===');
console.log('Current period cost:
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithStreaming() {
try {
// Activer le streaming pour des sorties de blocs spécifiques
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Compter jusqu'à cinq' },
stream: true,
selectedOutputs: ['agent1.content'] // Utiliser le format blockName.attribute
});
console.log('Résultat du workflow :', result);
} catch (error) {
console.error('Erreur :', error);
}
}
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", deux"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**React Streaming Example:**
```typescript
import { useState, useEffect } from 'react';
function StreamingWorkflow() {
const [output, setOutput] = useState('');
const [loading, setLoading] = useState(false);
const executeStreaming = async () => {
setLoading(true);
setOutput('');
// IMPORTANT: Make this API call from your backend server, not the browser
// Never expose your API key in client-side code
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
},
body: JSON.stringify({
message: 'Generate a story',
stream: true,
selectedOutputs: ['agent1.content']
})
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (reader) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
setLoading(false);
break;
}
try {
const parsed = JSON.parse(data);
if (parsed.chunk) {
setOutput(prev => prev + parsed.chunk);
} else if (parsed.event === 'done') {
console.log('Execution complete:', parsed.metadata);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
};
return (
<div>
<button onClick={executeStreaming} disabled={loading}>
{loading ? 'Génération en cours...' : 'Démarrer le streaming'}
</button>
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
</div>
);
}
```
## Getting Your API Key
<Steps>
<Step title="Connectez-vous à Sim">
Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
<Step title="Log in to Sim">
Navigate to [Sim](https://sim.ai) and log in to your account.
</Step>
<Step title="Ouvrez votre workflow">
Accédez au workflow que vous souhaitez exécuter par programmation.
<Step title="Open your workflow">
Navigate to the workflow you want to execute programmatically.
</Step>
<Step title="Déployez votre workflow">
Cliquez sur « Déployer » pour déployer votre workflow s'il n'a pas encore été déployé.
<Step title="Deploy your workflow">
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
</Step>
<Step title="Créez ou sélectionnez une clé API">
Pendant le processus de déploiement, sélectionnez ou créez une clé API.
<Step title="Create or select an API key">
During the deployment process, select or create an API key.
</Step>
<Step title="Copiez la clé API">
Copiez la clé API à utiliser dans votre application TypeScript/JavaScript.
<Step title="Copy the API key">
Copy the API key to use in your TypeScript/JavaScript application.
</Step>
</Steps>
<Callout type="warning">
Gardez votre clé API en sécurité et ne la soumettez jamais au contrôle de version. Utilisez des variables d'environnement ou une gestion de configuration sécurisée.
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
</Callout>
## Prérequis
## Requirements
- Node.js 16+
- TypeScript 5.0+ (pour les projets TypeScript)
- TypeScript 5.0+ (for TypeScript projects)
## Support TypeScript
## TypeScript Support
Le SDK est écrit en TypeScript et offre une sécurité de type complète :
The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -588,13 +971,13 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
message: 'Hello, TypeScript!'
message: 'Bonjour, TypeScript !'
}
});
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
## Licence
## License
Apache-2.0
Apache-2.0

View File

@@ -38,6 +38,84 @@ curl -X POST \
Les réponses réussies renvoient le résultat d'exécution sérialisé de l'exécuteur. Les erreurs révèlent des problèmes de validation, d'authentification ou d'échec du workflow.
## Réponses en streaming
Activez le streaming en temps réel pour recevoir les résultats du workflow au fur et à mesure qu'ils sont générés, caractère par caractère. Cela est utile pour afficher progressivement les réponses de l'IA aux utilisateurs.
### Paramètres de requête
Ajoutez ces paramètres pour activer le streaming :
- `stream` - Définissez à `true` pour activer le streaming Server-Sent Events (SSE)
- `selectedOutputs` - Tableau des sorties de blocs à diffuser en streaming (par exemple, `["agent1.content"]`)
### Format de sortie de bloc
Utilisez le format `blockName.attribute` pour spécifier quelles sorties de blocs diffuser en streaming :
- Format : `"blockName.attribute"` (par exemple, si vous souhaitez diffuser en streaming le contenu du bloc Agent 1, vous utiliseriez `"agent1.content"`)
- Les noms de blocs ne sont pas sensibles à la casse et les espaces sont ignorés
### Exemple de requête
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Count to five",
"stream": true,
"selectedOutputs": ["agent1.content"]
}'
```
### Format de réponse
Les réponses en streaming utilisent le format Server-Sent Events (SSE) :
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
Chaque événement comprend :
- **Fragments en streaming** : `{"blockId": "...", "chunk": "text"}` - Texte en temps réel au fur et à mesure qu'il est généré
- **Événement final** : `{"event": "done", ...}` - Métadonnées d'exécution et résultats complets
- **Terminateur** : `[DONE]` - Signale la fin du flux
### Streaming de plusieurs blocs
Lorsque `selectedOutputs` inclut plusieurs blocs, chaque fragment indique quel bloc l'a produit :
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Process this request",
"stream": true,
"selectedOutputs": ["agent1.content", "agent2.content"]
}'
```
Le champ `blockId` dans chaque fragment vous permet d'acheminer la sortie vers l'élément d'interface utilisateur approprié :
```
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
data: {"blockId":"agent1-uuid","chunk":" complete"}
```
## Référence des sorties
| Référence | Description |

View File

@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Simの公式Python SDKを使用すると、公式Python SDKを使用してPythonアプリケーションからプログラムでワークフローを実行できます。
<Callout type="info">
Python SDKはPython 3.8以上をサポートし、同期的なワークフロー実行を提供します。現在、すべてのワークフロー実行は同期的です。
Python SDKはPython 3.8以上をサポートし、同期実行、指数バックオフによる自動レート制限、使用状況追跡機能を提供します。
</Callout>
## インストール
@@ -70,12 +70,17 @@ result = client.execute_workflow(
)
```
**パラメータ**
**パラメータ:**
- `workflow_id` (str): 実行するワークフローのID
- `input_data` (dict, オプション): ワークフローに渡す入力データ
- `timeout` (float, オプション): タイムアウト(秒)(デフォルト30.0
- `timeout` (float, オプション): タイムアウト(秒)(デフォルト: 30.0
- `stream` (bool, オプション): ストリーミングレスポンスを有効にする(デフォルト: False
- `selected_outputs` (list[str], オプション): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`
- `async_execution` (bool, オプション): 非同期実行(デフォルト: False
**戻り値** `WorkflowExecutionResult`
**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
`async_execution=True`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
##### get_workflow_status()
@@ -86,7 +91,7 @@ status = client.get_workflow_status("workflow-id")
print("Is deployed:", status.is_deployed)
```
**パラメータ**
**パラメータ:**
- `workflow_id` (str): ワークフローのID
**戻り値:** `WorkflowStatus`
@@ -107,28 +112,118 @@ if is_ready:
**戻り値:** `bool`
##### execute_workflow_sync()
##### get_job_status()
<Callout type="info">
現在、このメソッドは `execute_workflow()` と同一です。すべての実行は同期的に行われるためです。このメソッドは、将来的に非同期実行が追加された際の互換性のために提供されています。
</Callout>
ワークフローを実行します(現在は同期的、`execute_workflow()` と同じ)。
非同期ジョブ実行のステータスを取得します。
```python
result = client.execute_workflow_sync(
status = client.get_job_status("task-id-from-async-execution")
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
if status["status"] == "completed":
print("Output:", status["output"])
```
**パラメータ:**
- `task_id` (str): 非同期実行から返されたタスクID
**戻り値:** `Dict[str, Any]`
**レスポンスフィールド:**
- `success` (bool): リクエストが成功したかどうか
- `taskId` (str): タスクID
- `status` (str): 次のいずれか: `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (dict): `startedAt`, `completedAt`, `duration`を含む
- `output` (any, オプション): ワークフロー出力(完了時)
- `error` (any, オプション): エラー詳細(失敗時)
- `estimatedDuration` (int, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
##### execute_with_retry()
指数バックオフを使用してレート制限エラーで自動的に再試行するワークフロー実行。
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"data": "some input"},
timeout=60.0
input_data={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
max_delay=30.0, # Maximum delay in seconds
backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**パラメータ:**
- `workflow_id` (str): 実行するワークフローのID
- `input_data` (dict, optional): ワークフローに渡す入力データ
- `timeout` (float): 初期リクエストのタイムアウト(秒)
- `input_data` (dict, オプション): ワークフローに渡す入力データ
- `timeout` (float, オプション): タイムアウト(秒)
- `stream` (bool, オプション): ストリーミングレスポンスを有効にする
- `selected_outputs` (list, オプション): ストリーミングするブロック出力
- `async_execution` (bool, オプション): 非同期実行
- `max_retries` (int, オプション): 最大再試行回数(デフォルト: 3
- `initial_delay` (float, オプション): 初期遅延(秒)(デフォルト: 1.0
- `max_delay` (float, オプション): 最大遅延(秒)(デフォルト: 30.0
- `backoff_multiplier` (float, オプション): バックオフ乗数(デフォルト: 2.0
**戻り値:** `WorkflowExecutionResult`
**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
リトライロジックは、サンダリングハード問題を防ぐために±25%のジッターを伴う指数バックオフ1秒→2秒→4秒→8秒...を使用します。APIが `retry-after` ヘッダーを提供する場合、代わりにそれが使用されます。
##### get_rate_limit_info()
最後のAPIレスポンスから現在のレート制限情報を取得します。
```python
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
print("Limit:", rate_limit_info.limit)
print("Remaining:", rate_limit_info.remaining)
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
```
**戻り値:** `RateLimitInfo | None`
##### get_usage_limits()
アカウントの現在の使用制限とクォータ情報を取得します。
```python
limits = client.get_usage_limits()
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
print("Current period cost:", limits.usage["currentPeriodCost"])
print("Plan:", limits.usage["plan"])
```
**戻り値:** `UsageLimits`
**レスポンス構造:**
```python
{
"success": bool,
"rateLimit": {
"sync": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"async": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"authType": str # 'api' or 'manual'
},
"usage": {
"currentPeriodCost": float,
"limit": float,
"plan": str # e.g., 'free', 'pro'
}
}
```
##### set_api_key()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
### AsyncExecutionResult
```python
@dataclass
class AsyncExecutionResult:
success: bool
task_id: str
status: str # 'queued'
created_at: str
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
```
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
### RateLimitInfo
```python
@dataclass
class RateLimitInfo:
limit: int
remaining: int
reset: int
retry_after: Optional[int] = None
```
### UsageLimits
```python
@dataclass
class UsageLimits:
success: bool
rate_limit: Dict[str, Any]
usage: Dict[str, Any]
```
### SimStudioError
```python
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
self.status = status
```
**一般的なエラーコード:**
- `UNAUTHORIZED`: 無効なAPIキー
- `TIMEOUT`: リクエストがタイムアウトしました
- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
## 例
### 基本的なワークフロー実行
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +387,7 @@ run_workflow()
from simstudio import SimStudioClient, SimStudioError
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -277,14 +412,14 @@ def execute_with_error_handling():
### コンテキストマネージャーの使用
リソースのクリーンアップを自動的に処理するためにコンテキストマネージャーとしてクライアントを使用します:
リソースのクリーンアップを自動的に処理するためにクライアントをコンテキストマネージャーとして使用します:
```python
from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +433,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
### 非同期ワークフロー実行
長時間実行されるタスクのためにワークフローを非同期で実行します:
```python
import os
import time
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_async():
try:
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
# Check if result is an async execution
if hasattr(result, 'task_id'):
print(f"Task ID: {result.task_id}")
print(f"Status endpoint: {result.links['status']}")
# Poll for completion
status = client.get_job_status(result.task_id)
while status["status"] in ["queued", "processing"]:
print(f"Current status: {status['status']}")
time.sleep(2) # Wait 2 seconds
status = client.get_job_status(result.task_id)
if status["status"] == "completed":
print("Workflow completed!")
print(f"Output: {status['output']}")
print(f"Duration: {status['metadata']['duration']}")
else:
print(f"Workflow failed: {status['error']}")
except Exception as error:
print(f"Error: {error}")
execute_async()
```
### レート制限とリトライ
指数バックオフを使用して自動的にレート制限を処理します:
```python
import os
from simstudio import SimStudioClient, SimStudioError
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_retry_handling():
try:
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
backoff_multiplier=2.0
)
print(f"Success: {result}")
except SimStudioError as error:
if error.code == "RATE_LIMIT_EXCEEDED":
print("Rate limit exceeded after all retries")
# Check rate limit info
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
from datetime import datetime
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
print(f"Rate limit resets at: {reset_time}")
execute_with_retry_handling()
```
### 使用状況モニタリング
アカウントの使用状況と制限をモニタリングします:
```python
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def check_usage():
try:
limits = client.get_usage_limits()
print("=== Rate Limits ===")
print("Sync requests:")
print(f" Limit: {limits.rate_limit['sync']['limit']}")
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
print("\nAsync requests:")
print(f" Limit: {limits.rate_limit['async']['limit']}")
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
print("\n=== Usage ===")
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
print(f"Limit: ${limits.usage['limit']:.2f}")
print(f"Plan: {limits.usage['plan']}")
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
print(f"Usage: {percent_used:.1f}%")
if percent_used > 80:
print("⚠️ Warning: You are approaching your usage limit!")
except Exception as error:
print(f"Error checking usage: {error}")
check_usage()
```
### ワークフローの実行ストリーミング
リアルタイムのストリーミングレスポンスでワークフローを実行します:
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_streaming():
"""Execute workflow with streaming enabled."""
try:
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
print("Workflow result:", result)
except Exception as error:
print("Error:", error)
execute_with_streaming()
```
ストリーミングレスポンスはServer-Sent EventsSSE形式に従います
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**Flaskストリーミングの例**
```python
from flask import Flask, Response, stream_with_context
import requests
import json
import os
app = Flask(__name__)
@app.route('/stream-workflow')
def stream_workflow():
"""Stream workflow execution to the client."""
def generate():
response = requests.post(
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
headers={
'Content-Type': 'application/json',
'X-API-Key': os.getenv('SIM_API_KEY')
},
json={
'message': 'Generate a story',
'stream': True,
'selectedOutputs': ['agent1.content']
},
stream=True
)
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
data = decoded_line[6:] # Remove 'data: ' prefix
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if 'chunk' in parsed:
yield f"data: {json.dumps(parsed)}\n\n"
elif parsed.get('event') == 'done':
yield f"data: {json.dumps(parsed)}\n\n"
print("Execution complete:", parsed.get('metadata'))
except json.JSONDecodeError:
pass
return Response(
stream_with_context(generate()),
mimetype='text/event-stream'
)
if __name__ == '__main__':
app.run(debug=True)
```
### 環境設定
環境変数を使用してクライアントを設定します:
@@ -352,8 +711,8 @@ for result in results:
# Development configuration
client = SimStudioClient(
api_key=os.getenv("SIMSTUDIO_API_KEY"),
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
api_key=os.getenv("SIM_API_KEY")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +724,13 @@ for result in results:
from simstudio import SimStudioClient
# Production configuration with error handling
api_key = os.getenv("SIMSTUDIO_API_KEY")
api_key = os.getenv("SIM_API_KEY")
if not api_key:
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```

View File

@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
公式TypeScript/JavaScript SDKはSimのために完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。これにより、Node.jsアプリケーション、Webアプリケーション、その他のJavaScript環境からプログラムワークフローを実行することができます。現在、すべてのワークフロー実行は同期的に行われます。
Sim用の公式TypeScript/JavaScript SDKは完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。これにより、Node.jsアプリケーション、Webアプリケーション、その他のJavaScript環境からプログラムによってワークフローを実行することができます。
<Callout type="info">
TypeScript SDKは完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。現在、すべてのワークフロー実行は同期的に行われます。
TypeScript SDKは完全な型安全性、非同期実行サポート、指数バックオフによる自動レート制限、使用状況追跡を提供します。
</Callout>
## インストール
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
- `options` (ExecutionOptions, オプション):
- `input` (any): ワークフローに渡す入力データ
- `timeout` (number): タイムアウト(ミリ秒)(デフォルト: 30000
- `stream` (boolean): ストリーミングレスポンスを有効にする(デフォルト: false
- `selectedOutputs` (string[]): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`
- `async` (boolean): 非同期実行(デフォルト: false
**戻り値:** `Promise<WorkflowExecutionResult>`
**戻り値:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
`async: true`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
##### getWorkflowStatus()
@@ -114,7 +119,7 @@ console.log('Is deployed:', status.isDeployed);
##### validateWorkflow()
ワークフローが実行準備ができているか検証します。
ワークフローが実行準備ができているか検証します。
```typescript
const isReady = await client.validateWorkflow('workflow-id');
@@ -128,28 +133,117 @@ if (isReady) {
**戻り値:** `Promise<boolean>`
##### executeWorkflowSync()
##### getJobStatus()
<Callout type="info">
現在、このメソッドは `executeWorkflow()` と同一です。すべての実行は同期的に行われるためです。このメソッドは、将来的に非同期実行が追加された際の互換性のために提供されています。
</Callout>
ワークフローを実行します(現在は同期的、`executeWorkflow()` と同じ)。
非同期ジョブ実行のステータスを取得します。
```typescript
const result = await client.executeWorkflowSync('workflow-id', {
input: { data: 'some input' },
timeout: 60000
const status = await client.getJobStatus('task-id-from-async-execution');
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
if (status.status === 'completed') {
console.log('Output:', status.output);
}
```
**パラメータ:**
- `taskId` (string): 非同期実行から返されたタスクID
**戻り値:** `Promise<JobStatus>`
**レスポンスフィールド:**
- `success` (boolean): リクエストが成功したかどうか
- `taskId` (string): タスクID
- `status` (string): 次のいずれか `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (object): `startedAt`, `completedAt`, および `duration` を含む
- `output` (any, オプション): ワークフロー出力(完了時)
- `error` (any, オプション): エラー詳細(失敗時)
- `estimatedDuration` (number, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
##### executeWithRetry()
レート制限エラー時に指数バックオフを使用して自動的に再試行するワークフロー実行。
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
initialDelay: 1000, // Initial delay in ms (1 second)
maxDelay: 30000, // Maximum delay in ms (30 seconds)
backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**パラメータ:**
- `workflowId` (string): 実行するワークフローのID
- `options` (ExecutionOptions, オプション):
- `input` (any): ワークフローに渡す入力データ
- `timeout` (number): 初期リクエストのタイムアウト(ミリ秒
- `options` (ExecutionOptions, オプション): `executeWorkflow()`と同じ
- `retryOptions` (RetryOptions, オプション):
- `maxRetries` (number): 最大再試行回数(デフォルト: 3
- `initialDelay` (number): 初期遅延(ミリ秒)(デフォルト: 1000
- `maxDelay` (number): 最大遅延(ミリ秒)(デフォルト: 30000
- `backoffMultiplier` (number): バックオフ乗数(デフォルト: 2
**戻り値:** `Promise<WorkflowExecutionResult>`
**戻り値:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
再試行ロジックは、サンダリングハード問題を防ぐために±25%のジッターを含む指数バックオフ1秒→2秒→4秒→8秒...を使用します。APIが`retry-after`ヘッダーを提供する場合、代わりにそれが使用されます。
##### getRateLimitInfo()
最後のAPIレスポンスから現在のレート制限情報を取得します。
```typescript
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Limit:', rateLimitInfo.limit);
console.log('Remaining:', rateLimitInfo.remaining);
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
}
```
**戻り値:** `RateLimitInfo | null`
##### getUsageLimits()
アカウントの現在の使用制限とクォータ情報を取得します。
```typescript
const limits = await client.getUsageLimits();
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
console.log('Current period cost:', limits.usage.currentPeriodCost);
console.log('Plan:', limits.usage.plan);
```
**戻り値:** `Promise<UsageLimits>`
**レスポンス構造:**
```typescript
{
success: boolean
rateLimit: {
sync: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
async: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
authType: string // 'api' or 'manual'
}
usage: {
currentPeriodCost: number
limit: number
plan: string // e.g., 'free', 'pro'
}
}
```
##### setApiKey()
@@ -167,7 +261,7 @@ client.setApiKey('new-api-key');
client.setBaseUrl('https://my-custom-domain.com');
```
## 型
## 型定義
### WorkflowExecutionResult
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
### AsyncExecutionResult
```typescript
interface AsyncExecutionResult {
success: boolean;
taskId: string;
status: 'queued';
createdAt: string;
links: {
status: string; // e.g., "/api/jobs/{taskId}"
};
}
```
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
### RateLimitInfo
```typescript
interface RateLimitInfo {
limit: number;
remaining: number;
reset: number;
retryAfter?: number;
}
```
### UsageLimits
```typescript
interface UsageLimits {
success: boolean;
rateLimit: {
sync: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
async: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
authType: string;
};
usage: {
currentPeriodCost: number;
limit: number;
plan: string;
};
}
```
### SimStudioError
```typescript
@@ -207,9 +354,16 @@ class SimStudioError extends Error {
}
```
**一般的なエラーコード:**
- `UNAUTHORIZED`: 無効なAPIキー
- `TIMEOUT`: リクエストがタイムアウトしました
- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
## 例
### 基本的なワークフロー実行
### 基本的なワークフロー実行
<Steps>
<Step title="クライアントの初期化">
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ runWorkflow();
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,21 +487,21 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
</Tab>
</Tabs>
### Node.js Expressとの統合
### Node.js Express統合
Express.jsサーバーとの統合
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -430,7 +584,7 @@ export default async function handler(
### ブラウザでの使用
ブラウザで使用する場合適切なCORS設定が必要
ブラウザで使用適切なCORS設定が必要
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
@@ -464,7 +618,7 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
```
<Callout type="warning">
ブラウザでSDKを使用する際は、機密性の高いAPIキーを公開しないよう注意してください。バックエンドプロキシや権限が制限された公開APIキーの使用を検討してください。
ブラウザでSDKを使用する場合、機密性の高いAPIキーを公開しないよう注意してください。バックエンドプロキシや権限が制限された公開APIキーの使用を検討してください。
</Callout>
### Reactフックの例
@@ -476,7 +630,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
<button onClick={handleExecute} disabled={loading}>
{loading ? 'Executing...' : 'Execute Workflow'}
</button>
{error && <div>Error: {error.message}</div>}
{result && (
<div>
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
## APIキーの取得方法
### 非同期ワークフロー実行
長時間実行タスク向けに非同期でワークフローを実行:
```typescript
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
async: true // Execute asynchronously
});
// Check if result is an async execution
if ('taskId' in result) {
console.log('Task ID:', result.taskId);
console.log('Status endpoint:', result.links.status);
// Poll for completion
let status = await client.getJobStatus(result.taskId);
while (status.status === 'queued' || status.status === 'processing') {
console.log('Current status:', status.status);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
status = await client.getJobStatus(result.taskId);
}
if (status.status === 'completed') {
console.log('Workflow completed!');
console.log('Output:', status.output);
console.log('Duration:', status.metadata.duration);
} else {
console.error('Workflow failed:', status.error);
}
}
} catch (error) {
console.error('Error:', error);
}
}
executeAsync();
```
### レート制限とリトライ
指数バックオフによるレート制限の自動処理:
```typescript
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
backoffMultiplier: 2
});
console.log('Success:', result);
} catch (error) {
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
console.error('Rate limit exceeded after all retries');
// Check rate limit info
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
}
}
}
}
```
### 使用状況モニタリング
アカウントの使用状況と制限のモニタリング:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function checkUsage() {
try {
const limits = await client.getUsageLimits();
console.log('=== Rate Limits ===');
console.log('Sync requests:');
console.log(' Limit:', limits.rateLimit.sync.limit);
console.log(' Remaining:', limits.rateLimit.sync.remaining);
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
console.log('\nAsync requests:');
console.log(' Limit:', limits.rateLimit.async.limit);
console.log(' Remaining:', limits.rateLimit.async.remaining);
console.log(' Resets at:', limits.rateLimit.async.resetAt);
console.log(' Is limited:', limits.rateLimit.async.isLimited);
console.log('\n=== Usage ===');
console.log('Current period cost:
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithStreaming() {
try {
// 特定のブロック出力のストリーミングを有効化
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Count to five' },
stream: true,
selectedOutputs: ['agent1.content'] // blockName.attribute形式を使用
});
console.log('ワークフロー結果:', result);
} catch (error) {
console.error('エラー:', error);
}
}
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**React Streaming Example:**
```typescript
import { useState, useEffect } from 'react';
function StreamingWorkflow() {
const [output, setOutput] = useState('');
const [loading, setLoading] = useState(false);
const executeStreaming = async () => {
setLoading(true);
setOutput('');
// IMPORTANT: Make this API call from your backend server, not the browser
// Never expose your API key in client-side code
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
},
body: JSON.stringify({
message: 'Generate a story',
stream: true,
selectedOutputs: ['agent1.content']
})
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (reader) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
setLoading(false);
break;
}
try {
const parsed = JSON.parse(data);
if (parsed.chunk) {
setOutput(prev => prev + parsed.chunk);
} else if (parsed.event === 'done') {
console.log('Execution complete:', parsed.metadata);
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
};
return (
<div>
<button onClick={executeStreaming} disabled={loading}>
{loading ? 'Generating...' : 'Start Streaming'}
</button>
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
</div>
);
}
```
## Getting Your API Key
<Steps>
<Step title="Simにログイン">
[Sim](https://sim.ai)に移動してアカウントにログインします。
<Step title="Log in to Sim">
Navigate to [Sim](https://sim.ai) and log in to your account.
</Step>
<Step title="ワークフローを開く">
プログラムで実行したいワークフローに移動します。
<Step title="Open your workflow">
Navigate to the workflow you want to execute programmatically.
</Step>
<Step title="ワークフローをデプロイ">
まだデプロイされていない場合は、「デプロイ」をクリックしてワークフローをデプロイします。
<Step title="Deploy your workflow">
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
</Step>
<Step title="APIキーを作成または選択">
デプロイ処理中に、APIキーを選択または作成します。
<Step title="Create or select an API key">
During the deployment process, select or create an API key.
</Step>
<Step title="APIキーをコピー">
TypeScript/JavaScriptアプリケーションで使用するAPIキーをコピーします。
<Step title="Copy the API key">
Copy the API key to use in your TypeScript/JavaScript application.
</Step>
</Steps>
<Callout type="warning">
APIキーは安全に保管し、バージョン管理システムにコミットしないでください。環境変数や安全な設定管理を使用してください。
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
</Callout>
## 要件
## Requirements
- Node.js 16以上
- TypeScript 5.0以上(TypeScriptプロジェクトの場合)
- Node.js 16+
- TypeScript 5.0+ (for TypeScript projects)
## TypeScriptサポート
## TypeScript Support
このSDKはTypeScriptで書かれており、完全な型安全性を提供します
The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -588,7 +971,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
## ライセンス
## License
Apache-2.0
Apache-2.0

View File

@@ -38,6 +38,84 @@ curl -X POST \
成功したレスポンスはエグゼキュータからシリアル化された実行結果を返します。エラーは検証、認証、またはワークフローの失敗を表示します。
## ストリーミングレスポンス
リアルタイムストリーミングを有効にすると、ワークフローの出力が生成されるたびに文字単位で受信できます。これはAIの応答をユーザーに段階的に表示するのに役立ちます。
### リクエストパラメータ
ストリーミングを有効にするには、これらのパラメータを追加してください:
- `stream` - Server-Sent Events (SSE)ストリーミングを有効にするには `true` に設定します
- `selectedOutputs` - ストリーミングするブロック出力の配列(例:`["agent1.content"]`
### ブロック出力フォーマット
`blockName.attribute` フォーマットを使用して、ストリーミングするブロック出力を指定します:
- フォーマット:`"blockName.attribute"`Agent 1ブロックの内容をストリーミングしたい場合は、`"agent1.content"` を使用します)
- ブロック名は大文字小文字を区別せず、スペースは無視されます
### リクエスト例
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Count to five",
"stream": true,
"selectedOutputs": ["agent1.content"]
}'
```
### レスポンスフォーマット
ストリーミングレスポンスはServer-Sent Events (SSE)フォーマットを使用します:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
各イベントには以下が含まれます:
- **ストリーミングチャンク**`{"blockId": "...", "chunk": "text"}` - 生成されるリアルタイムテキスト
- **最終イベント**`{"event": "done", ...}` - 実行メタデータと完全な結果
- **ターミネーター**`[DONE]` - ストリーム終了を示す信号
### 複数ブロックのストリーミング
`selectedOutputs` に複数のブロックが含まれる場合、各チャンクはどのブロックから生成されたかを示します:
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Process this request",
"stream": true,
"selectedOutputs": ["agent1.content", "agent2.content"]
}'
```
各チャンクの `blockId` フィールドを使用して、出力を正しいUI要素にルーティングできます
```
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
data: {"blockId":"agent1-uuid","chunk":" complete"}
```
## 出力リファレンス
| リファレンス | 説明 |
@@ -45,7 +123,7 @@ curl -X POST \
| `<api.field>` | 入力フォーマットで定義されたフィールド |
| `<api.input>` | 構造化されたリクエスト本文全体 |
入力フォーマットが定義されていない場合、エグゼキュータは生のJSONを `<api.input>` のみ公開します。
入力フォーマットが定義されていない場合、エグゼキュータは `<api.input>` のみ生のJSONを公開します。
<Callout type="warning">
ワークフローには1つのAPIトリガーのみ含めることができます。変更後は新しいデプロイメントを公開して、エンドポイントを最新の状態に保ってください。

View File

@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
官方的 Python SDK 允许您通过 Python 应用程序以编程方式执行工作流。
<Callout type="info">
Python SDK 支持 Python 3.8+并提供同步工作流执行。目前所有工作流执行均为同步模式
Python SDK 支持 Python 3.8+具备异步执行支持、自动速率限制(带指数退避)以及使用情况跟踪功能
</Callout>
## 安装
@@ -72,10 +72,15 @@ result = client.execute_workflow(
**参数:**
- `workflow_id` (str): 要执行的工作流 ID
- `input_data` (dict, 可选): 传递给工作流的输入数据
- `timeout` (float, 可选): 超时时间以秒为单位默认值30.0
- `input_data` (dict, optional): 传递给工作流的输入数据
- `timeout` (float, optional): 超时时间以秒为单位默认值30.0
- `stream` (bool, optional): 启用流式响应默认值False
- `selected_outputs` (list[str], optional): 以 `blockName.attribute` 格式阻止输出流(例如,`["agent1.content"]`
- `async_execution` (bool, optional): 异步执行默认值False
**返回值:** `WorkflowExecutionResult`
**返回值:** `WorkflowExecutionResult | AsyncExecutionResult`
当 `async_execution=True` 时,立即返回任务 ID 以供轮询。否则,等待完成。
##### get_workflow_status()
@@ -103,32 +108,122 @@ if is_ready:
```
**参数:**
- `workflow_id` (str)工作流的 ID
- `workflow_id` (str): 工作流的 ID
**返回值:** `bool`
##### execute_workflow_sync()
##### get_job_status()
<Callout type="info">
当前,此方法与 `execute_workflow()` 相同,因为所有执行都是同步的。提供此方法是为了在将来添加异步执行时保持兼容性。
</Callout>
执行工作流(当前为同步,与 `execute_workflow()` 相同)。
获取异步任务执行的状态。
```python
result = client.execute_workflow_sync(
status = client.get_job_status("task-id-from-async-execution")
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
if status["status"] == "completed":
print("Output:", status["output"])
```
**参数:**
- `task_id` (str): 异步执行返回的任务 ID
**返回值:** `Dict[str, Any]`
**响应字段:**
- `success` (bool): 请求是否成功
- `taskId` (str): 任务 ID
- `status` (str): 可能的值包括 `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (dict): 包含 `startedAt`, `completedAt` 和 `duration`
- `output` (any, optional): 工作流输出(完成时)
- `error` (any, optional): 错误详情(失败时)
- `estimatedDuration` (int, optional): 估计持续时间(以毫秒为单位,处理中/排队时)
##### execute_with_retry()
使用指数退避在速率限制错误上自动重试执行工作流。
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"data": "some input"},
timeout=60.0
input_data={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
max_delay=30.0, # Maximum delay in seconds
backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**参数:**
- `workflow_id` (str)要执行的工作流 ID
- `input_data` (dict, optional)传递给工作流的输入数据
- `timeout` (float):初始请求的超时时间(以秒为单位)
- `workflow_id` (str): 要执行的工作流 ID
- `input_data` (dict, optional): 传递给工作流的输入数据
- `timeout` (float, optional): 超时时间(以秒为单位)
- `stream` (bool, optional): 启用流式响应
- `selected_outputs` (list, optional): 阻止输出流
- `async_execution` (bool, optional): 异步执行
- `max_retries` (int, optional): 最大重试次数默认值3
- `initial_delay` (float, optional): 初始延迟时间以秒为单位默认值1.0
- `max_delay` (float, optional): 最大延迟时间以秒为单位默认值30.0
- `backoff_multiplier` (float, optional): 退避倍数默认值2.0
**返回值:** `WorkflowExecutionResult`
**返回值:** `WorkflowExecutionResult | AsyncExecutionResult`
重试逻辑使用指数退避1 秒 → 2 秒 → 4 秒 → 8 秒...),并带有 ±25% 的抖动以防止惊群效应。如果 API 提供了 `retry-after` 标头,则会使用该标头。
##### get_rate_limit_info()
从上一次 API 响应中获取当前的速率限制信息。
```python
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
print("Limit:", rate_limit_info.limit)
print("Remaining:", rate_limit_info.remaining)
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
```
**返回值:** `RateLimitInfo | None`
##### get_usage_limits()
获取您的账户当前的使用限制和配额信息。
```python
limits = client.get_usage_limits()
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
print("Current period cost:", limits.usage["currentPeriodCost"])
print("Plan:", limits.usage["plan"])
```
**返回值:** `UsageLimits`
**响应结构:**
```python
{
"success": bool,
"rateLimit": {
"sync": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"async": {
"isLimited": bool,
"limit": int,
"remaining": int,
"resetAt": str
},
"authType": str # 'api' or 'manual'
},
"usage": {
"currentPeriodCost": float,
"limit": float,
"plan": str # e.g., 'free', 'pro'
}
}
```
##### set_api_key()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
### AsyncExecutionResult
```python
@dataclass
class AsyncExecutionResult:
success: bool
task_id: str
status: str # 'queued'
created_at: str
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
```
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
### RateLimitInfo
```python
@dataclass
class RateLimitInfo:
limit: int
remaining: int
reset: int
retry_after: Optional[int] = None
```
### UsageLimits
```python
@dataclass
class UsageLimits:
success: bool
rate_limit: Dict[str, Any]
usage: Dict[str, Any]
```
### SimStudioError
```python
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
self.status = status
```
**常见错误代码:**
- `UNAUTHORIZED`: 无效的 API 密钥
- `TIMEOUT`: 请求超时
- `RATE_LIMIT_EXCEEDED`: 超出速率限制
- `USAGE_LIMIT_EXCEEDED`: 超出使用限制
- `EXECUTION_ERROR`: 工作流执行失败
## 示例
### 基本工作流执行
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +387,7 @@ run_workflow()
from simstudio import SimStudioClient, SimStudioError
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,7 +419,7 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +433,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
### 异步工作流执行
为长时间运行的任务异步执行工作流:
```python
import os
import time
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_async():
try:
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
# Check if result is an async execution
if hasattr(result, 'task_id'):
print(f"Task ID: {result.task_id}")
print(f"Status endpoint: {result.links['status']}")
# Poll for completion
status = client.get_job_status(result.task_id)
while status["status"] in ["queued", "processing"]:
print(f"Current status: {status['status']}")
time.sleep(2) # Wait 2 seconds
status = client.get_job_status(result.task_id)
if status["status"] == "completed":
print("Workflow completed!")
print(f"Output: {status['output']}")
print(f"Duration: {status['metadata']['duration']}")
else:
print(f"Workflow failed: {status['error']}")
except Exception as error:
print(f"Error: {error}")
execute_async()
```
### 速率限制与重试
通过指数退避自动处理速率限制:
```python
import os
from simstudio import SimStudioClient, SimStudioError
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_retry_handling():
try:
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
backoff_multiplier=2.0
)
print(f"Success: {result}")
except SimStudioError as error:
if error.code == "RATE_LIMIT_EXCEEDED":
print("Rate limit exceeded after all retries")
# Check rate limit info
rate_limit_info = client.get_rate_limit_info()
if rate_limit_info:
from datetime import datetime
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
print(f"Rate limit resets at: {reset_time}")
execute_with_retry_handling()
```
### 使用监控
监控您的账户使用情况和限制:
```python
import os
from simstudio import SimStudioClient
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def check_usage():
try:
limits = client.get_usage_limits()
print("=== Rate Limits ===")
print("Sync requests:")
print(f" Limit: {limits.rate_limit['sync']['limit']}")
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
print("\nAsync requests:")
print(f" Limit: {limits.rate_limit['async']['limit']}")
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
print("\n=== Usage ===")
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
print(f"Limit: ${limits.usage['limit']:.2f}")
print(f"Plan: {limits.usage['plan']}")
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
print(f"Usage: {percent_used:.1f}%")
if percent_used > 80:
print("⚠️ Warning: You are approaching your usage limit!")
except Exception as error:
print(f"Error checking usage: {error}")
check_usage()
```
### 流式工作流执行
通过实时流式响应执行工作流:
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_streaming():
"""Execute workflow with streaming enabled."""
try:
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
print("Workflow result:", result)
except Exception as error:
print("Error:", error)
execute_with_streaming()
```
流式响应遵循服务器发送事件 (SSE) 格式:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**Flask 流式示例:**
```python
from flask import Flask, Response, stream_with_context
import requests
import json
import os
app = Flask(__name__)
@app.route('/stream-workflow')
def stream_workflow():
"""Stream workflow execution to the client."""
def generate():
response = requests.post(
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
headers={
'Content-Type': 'application/json',
'X-API-Key': os.getenv('SIM_API_KEY')
},
json={
'message': 'Generate a story',
'stream': True,
'selectedOutputs': ['agent1.content']
},
stream=True
)
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
data = decoded_line[6:] # Remove 'data: ' prefix
if data == '[DONE]':
break
try:
parsed = json.loads(data)
if 'chunk' in parsed:
yield f"data: {json.dumps(parsed)}\n\n"
elif parsed.get('event') == 'done':
yield f"data: {json.dumps(parsed)}\n\n"
print("Execution complete:", parsed.get('metadata'))
except json.JSONDecodeError:
pass
return Response(
stream_with_context(generate()),
mimetype='text/event-stream'
)
if __name__ == '__main__':
app.run(debug=True)
```
### 环境配置
使用环境变量配置客户端:
@@ -352,8 +711,8 @@ for result in results:
# Development configuration
client = SimStudioClient(
api_key=os.getenv("SIMSTUDIO_API_KEY"),
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
api_key=os.getenv("SIM_API_KEY")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +724,13 @@ for result in results:
from simstudio import SimStudioClient
# Production configuration with error handling
api_key = os.getenv("SIMSTUDIO_API_KEY")
api_key = os.getenv("SIM_API_KEY")
if not api_key:
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -382,15 +741,15 @@ for result in results:
<Steps>
<Step title="登录 Sim">
访问 [Sim](https://sim.ai) 并登录您的账户。
前往 [Sim](https://sim.ai) 并登录您的账户。
</Step>
<Step title="打开您的工作流">
导航到您想要以编程方式执行的工作流。
前往您想要以编程方式执行的工作流。
</Step>
<Step title="部署您的工作流">
点击“部署”以部署您的工作流(如果尚未部署)
如果尚未部署,请点击“部署”以部署您的工作流。
</Step>
<Step title="创建或选择 API 密钥">
<Step title="创建或选择一个 API 密钥">
在部署过程中,选择或创建一个 API 密钥。
</Step>
<Step title="复制 API 密钥">

View File

@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Sim 的官方 TypeScript/JavaScript SDK 提供完整的类型安全,支持 Node.js 和浏览器环境,允许您从 Node.js 应用程序、Web 应用程序和其他 JavaScript 环境中以编程方式执行工作流。目前,所有工作流执行均为同步。
Sim 的官方 TypeScript/JavaScript SDK 提供完整的类型安全,支持 Node.js 和浏览器环境,允许您从 Node.js 应用程序、Web 应用程序和其他 JavaScript 环境中以编程方式执行工作流。
<Callout type="info">
TypeScript SDK 提供完整的类型安全,支持 Node.js 和浏览器环境。目前,所有工作流执行均为同步
TypeScript SDK 提供完整的类型安全、异步执行支持、带有指数回退的自动速率限制以及使用跟踪
</Callout>
## 安装
@@ -91,12 +91,17 @@ const result = await client.executeWorkflow('workflow-id', {
```
**参数:**
- `workflowId`字符串要执行的工作流的 ID
- `options`ExecutionOptions可选
- `input`任意类型传递给工作流的输入数据
- `timeout`(数字):超时时间以毫秒为单位默认值30000
- `workflowId` (字符串): 要执行的工作流的 ID
- `options` (ExecutionOptions可选):
- `input` (任意类型): 传递给工作流的输入数据
- `timeout` (数字): 超时时间以毫秒为单位默认值30000
- `stream` (布尔值): 启用流式响应默认值false
- `selectedOutputs` (字符串数组): 以 `blockName.attribute` 格式阻止流中的输出(例如,`["agent1.content"]`
- `async` (布尔值): 异步执行默认值false
**返回值:** `Promise<WorkflowExecutionResult>`
**返回值:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
当 `async: true` 时,立即返回一个用于轮询的任务 ID。否则等待完成。
##### getWorkflowStatus()
@@ -108,7 +113,7 @@ console.log('Is deployed:', status.isDeployed);
```
**参数:**
- `workflowId`字符串工作流的 ID
- `workflowId` (字符串): 工作流的 ID
**返回值:** `Promise<WorkflowStatus>`
@@ -124,32 +129,121 @@ if (isReady) {
```
**参数:**
- `workflowId`字符串工作流的 ID
- `workflowId` (字符串): 工作流的 ID
**返回值:** `Promise<boolean>`
##### executeWorkflowSync()
##### getJobStatus()
<Callout type="info">
当前,此方法与 `executeWorkflow()` 相同,因为所有执行都是同步的。提供此方法是为了在将来添加异步执行时保持兼容性。
</Callout>
执行工作流(当前为同步,与 `executeWorkflow()` 相同)。
获取异步任务执行的状态。
```typescript
const result = await client.executeWorkflowSync('workflow-id', {
input: { data: 'some input' },
timeout: 60000
const status = await client.getJobStatus('task-id-from-async-execution');
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
if (status.status === 'completed') {
console.log('Output:', status.output);
}
```
**参数:**
- `taskId` (字符串): 异步执行返回的任务 ID
**返回值:** `Promise<JobStatus>`
**响应字段:**
- `success` (布尔值): 请求是否成功
- `taskId` (字符串): 任务 ID
- `status` (字符串): 可能的值包括 `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
- `metadata` (对象): 包含 `startedAt`, `completedAt` 和 `duration`
- `output` (任意类型,可选): 工作流输出(完成时)
- `error` (任意类型,可选): 错误详情(失败时)
- `estimatedDuration` (数字,可选): 估计持续时间(以毫秒为单位,处理中/排队时)
##### executeWithRetry()
使用指数退避机制,在遇到速率限制错误时自动重试执行工作流。
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
initialDelay: 1000, // Initial delay in ms (1 second)
maxDelay: 30000, // Maximum delay in ms (30 seconds)
backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**参数:**
- `workflowId`(字符串):要执行的工作流的 ID
- `options`ExecutionOptions可选
- `input`(任意类型):传递给工作流的输入数据
- `timeout`(数字):初始请求的超时时间(以毫秒为单位
- `options`ExecutionOptions可选与 `executeWorkflow()` 相同
- `retryOptions`RetryOptions可选
- `maxRetries`数字最大重试次数默认值3
- `initialDelay`数字初始延迟时间以毫秒为单位默认值1000
- `maxDelay`数字最大延迟时间以毫秒为单位默认值30000
- `backoffMultiplier`数字退避倍数默认值2
**返回值:** `Promise<WorkflowExecutionResult>`
**返回值:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
重试逻辑使用指数退避1秒 → 2秒 → 4秒 → 8秒...),并带有 ±25% 的抖动以防止蜂拥效应。如果 API 提供了 `retry-after` 头,则会使用该头。
##### getRateLimitInfo()
从上一次 API 响应中获取当前速率限制信息。
```typescript
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Limit:', rateLimitInfo.limit);
console.log('Remaining:', rateLimitInfo.remaining);
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
}
```
**返回值:** `RateLimitInfo | null`
##### getUsageLimits()
获取您的账户当前的使用限制和配额信息。
```typescript
const limits = await client.getUsageLimits();
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
console.log('Current period cost:', limits.usage.currentPeriodCost);
console.log('Plan:', limits.usage.plan);
```
**返回值:** `Promise<UsageLimits>`
**响应结构:**
```typescript
{
success: boolean
rateLimit: {
sync: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
async: {
isLimited: boolean
limit: number
remaining: number
resetAt: string
}
authType: string // 'api' or 'manual'
}
usage: {
currentPeriodCost: number
limit: number
plan: string // e.g., 'free', 'pro'
}
}
```
##### setApiKey()
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
### AsyncExecutionResult
```typescript
interface AsyncExecutionResult {
success: boolean;
taskId: string;
status: 'queued';
createdAt: string;
links: {
status: string; // e.g., "/api/jobs/{taskId}"
};
}
```
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
### RateLimitInfo
```typescript
interface RateLimitInfo {
limit: number;
remaining: number;
reset: number;
retryAfter?: number;
}
```
### UsageLimits
```typescript
interface UsageLimits {
success: boolean;
rateLimit: {
sync: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
async: {
isLimited: boolean;
limit: number;
remaining: number;
resetAt: string;
};
authType: string;
};
usage: {
currentPeriodCost: number;
limit: number;
plan: string;
};
}
```
### SimStudioError
```typescript
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
}
```
**常见错误代码:**
- `UNAUTHORIZED`: 无效的 API 密钥
- `TIMEOUT`: 请求超时
- `RATE_LIMIT_EXCEEDED`: 超出速率限制
- `USAGE_LIMIT_EXCEEDED`: 超出使用限制
- `EXECUTION_ERROR`: 工作流执行失败
## 示例
### 基本工作流执行
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ runWorkflow();
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +487,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
const apiKey = process.env.SIMSTUDIO_API_KEY;
const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
### React Hook 示例
为工作流执行创建一个自定义 React Hook
为工作流执行创建自定义 React hook
```typescript
import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
<button onClick={handleExecute} disabled={loading}>
{loading ? 'Executing...' : 'Execute Workflow'}
</button>
{error && <div>Error: {error.message}</div>}
{result && (
<div>
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
## 获取您的 API 密钥
### 异步工作流执行
为长时间运行的任务异步执行工作流:
```typescript
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
async: true // Execute asynchronously
});
// Check if result is an async execution
if ('taskId' in result) {
console.log('Task ID:', result.taskId);
console.log('Status endpoint:', result.links.status);
// Poll for completion
let status = await client.getJobStatus(result.taskId);
while (status.status === 'queued' || status.status === 'processing') {
console.log('Current status:', status.status);
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
status = await client.getJobStatus(result.taskId);
}
if (status.status === 'completed') {
console.log('Workflow completed!');
console.log('Output:', status.output);
console.log('Duration:', status.metadata.duration);
} else {
console.error('Workflow failed:', status.error);
}
}
} catch (error) {
console.error('Error:', error);
}
}
executeAsync();
```
### 速率限制和重试
通过指数退避自动处理速率限制:
```typescript
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
backoffMultiplier: 2
});
console.log('Success:', result);
} catch (error) {
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
console.error('Rate limit exceeded after all retries');
// Check rate limit info
const rateLimitInfo = client.getRateLimitInfo();
if (rateLimitInfo) {
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
}
}
}
}
```
### 使用监控
监控您的账户使用情况和限制:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function checkUsage() {
try {
const limits = await client.getUsageLimits();
console.log('=== Rate Limits ===');
console.log('Sync requests:');
console.log(' Limit:', limits.rateLimit.sync.limit);
console.log(' Remaining:', limits.rateLimit.sync.remaining);
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
console.log('\nAsync requests:');
console.log(' Limit:', limits.rateLimit.async.limit);
console.log(' Remaining:', limits.rateLimit.async.remaining);
console.log(' Resets at:', limits.rateLimit.async.resetAt);
console.log(' Is limited:', limits.rateLimit.async.isLimited);
console.log('\n=== Usage ===');
console.log('Current period cost:
### Streaming Workflow Execution
Execute workflows with real-time streaming responses:
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
async function executeWithStreaming() {
try {
// 为特定的块输出启用流式传输
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Count to five' },
stream: true,
selectedOutputs: ['agent1.content'] // 使用 blockName.attribute 格式
});
console.log('工作流结果:', result);
} catch (error) {
console.error('错误:', error);
}
}
```
The streaming response follows the Server-Sent Events (SSE) format:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
**React Streaming Example:**
```typescript
import { useState, useEffect } from 'react';
function StreamingWorkflow() {
const [output, setOutput] = useState('');
const [loading, setLoading] = useState(false);
const executeStreaming = async () => {
setLoading(true);
setOutput('');
// 重要提示:请从您的后端服务器发起此 API 调用,而不是从浏览器发起
// 切勿在客户端代码中暴露您的 API 密钥
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': process.env.SIM_API_KEY! // 仅限服务器端环境变量
},
body: JSON.stringify({
message: '生成一个故事',
stream: true,
selectedOutputs: ['agent1.content']
})
});
const reader = response.body?.getReader();
const decoder = new TextDecoder();
while (reader) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
setLoading(false);
break;
}
try {
const parsed = JSON.parse(data);
if (parsed.chunk) {
setOutput(prev => prev + parsed.chunk);
} else if (parsed.event === 'done') {
console.log('执行完成:', parsed.metadata);
}
} catch (e) {
// 跳过无效的 JSON
}
}
}
}
};
return (
<div>
<button onClick={executeStreaming} disabled={loading}>
{loading ? '生成中...' : '开始流式处理'}
</button>
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
</div>
);
}
```
## Getting Your API Key
<Steps>
<Step title="登录 Sim">
访问 [Sim](https://sim.ai) 并登录您的账户。
<Step title="Log in to Sim">
Navigate to [Sim](https://sim.ai) and log in to your account.
</Step>
<Step title="打开您的工作流">
导航到您想要以编程方式执行的工作流。
<Step title="Open your workflow">
Navigate to the workflow you want to execute programmatically.
</Step>
<Step title="部署您的工作流">
如果尚未部署,请点击“部署”以部署您的工作流。
<Step title="Deploy your workflow">
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
</Step>
<Step title="创建或选择一个 API 密钥">
在部署过程中,选择或创建一个 API 密钥。
<Step title="Create or select an API key">
During the deployment process, select or create an API key.
</Step>
<Step title="复制 API 密钥">
复制 API 密钥以在您的 TypeScript/JavaScript 应用程序中使用。
<Step title="Copy the API key">
Copy the API key to use in your TypeScript/JavaScript application.
</Step>
</Steps>
<Callout type="warning">
请确保您的 API 密钥安全,切勿将其提交到版本控制中。使用环境变量或安全配置管理。
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
</Callout>
## 要求
## Requirements
- Node.js 16+
- TypeScript 5.0+(适用于 TypeScript 项目)
- TypeScript 5.0+ (for TypeScript projects)
## TypeScript 支持
## TypeScript Support
SDK 是用 TypeScript 编写的,并提供完整的类型安全:
The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -586,22 +969,22 @@ import {
SimStudioError
} from 'simstudio-ts-sdk';
// Type-safe client initialization
// 类型安全的客户端初始化
const client: SimStudioClient = new SimStudioClient({
apiKey: process.env.SIMSTUDIO_API_KEY!
apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
// 类型安全的工作流执行
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
message: 'Hello, TypeScript!'
message: '你好,TypeScript'
}
});
// Type-safe status checking
// 类型安全的状态检查
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
## 许可证
Apache-2.0
Apache-2.0

View File

@@ -38,6 +38,84 @@ curl -X POST \
成功的响应会返回来自执行器的序列化执行结果。错误会显示验证、认证或工作流失败的信息。
## 流式响应
启用实时流式传输以在生成时逐字符接收工作流输出。这对于向用户逐步显示 AI 响应非常有用。
### 请求参数
添加以下参数以启用流式传输:
- `stream` - 设置为 `true` 以启用服务器发送事件 (SSE) 流式传输
- `selectedOutputs` - 要流式传输的块输出数组(例如,`["agent1.content"]`
### 块输出格式
使用 `blockName.attribute` 格式指定要流式传输的块输出:
- 格式:`"blockName.attribute"`(例如,如果您想流式传输 Agent 1 块的内容,可以使用 `"agent1.content"`
- 块名称不区分大小写,空格会被忽略
### 示例请求
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Count to five",
"stream": true,
"selectedOutputs": ["agent1.content"]
}'
```
### 响应格式
流式响应使用服务器发送事件 (SSE) 格式:
```
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
data: [DONE]
```
每个事件包括:
- **流式块**`{"blockId": "...", "chunk": "text"}` - 实时生成的文本
- **最终事件**`{"event": "done", ...}` - 执行元数据和完整结果
- **终止符**`[DONE]` - 表示流结束
### 多块流式传输
当 `selectedOutputs` 包含多个块时,每个块会指示其来源:
```bash
curl -X POST \
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
-H 'Content-Type: application/json' \
-H 'X-API-Key: YOUR_KEY' \
-d '{
"message": "Process this request",
"stream": true,
"selectedOutputs": ["agent1.content", "agent2.content"]
}'
```
每个块中的 `blockId` 字段可让您将输出路由到正确的 UI 元素:
```
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
data: {"blockId":"agent1-uuid","chunk":" complete"}
```
## 输出参考
| 参考 | 描述 |
@@ -48,5 +126,5 @@ curl -X POST \
如果未定义输入格式,执行器仅在 `<api.input>` 处暴露原始 JSON。
<Callout type="warning">
一个工作流只能包含一个 API 触发器。更改后发布新部署,以确保端点保持最新。
一个工作流只能包含一个 API 触发器。更改后发布新部署,以确保端点保持最新。
</Callout>

View File

@@ -2231,8 +2231,8 @@ checksums:
d394ac42b56429e524dc5a771b0610b9:
meta/title: 9da9098244c6c7a0ebcc3009cef66c7b
content/0: 9218a2e190598690d0fc5c27c30f01bb
content/1: 8a3feb937915c3191f1eecb10b94297d
content/2: 99af1bfe8d1629acdb5a9229430af791
content/1: 6c88f52bdb4e4a5668d1b25b5f444f48
content/2: 7e827833339b6b4c6abdf154de7f9a0c
content/3: 391128dee61b5d0d43eba88567aaef42
content/4: 4d132e6346723ecf45c408afeab2757b
content/5: d3df764a69d2926d10aed65ad8693e9f
@@ -2254,79 +2254,155 @@ checksums:
content/21: bd0e851fdde30c0e94c00b60f85d655e
content/22: 837ca74ccf63f23333c54e010faf681c
content/23: 8fb33cfc314b86d35df8ea1b10466f20
content/24: 09e003c28fb1810e9afefe51324265fd
content/25: 07fb2d6b16c75839a32d383f12419ca5
content/26: 9fd0cd99a879360d355d91e9cfb41531
content/27: f6fed8ebf67ba12199b4474a754969ae
content/28: bcee3febe1be079e53aea841e2b08b3b
content/29: f00be560fcd4ff3f53d61c70c249597b
content/30: fa4fa1573c369fcc2eee57d7852caf9c
content/31: fa68c1f8c9ea3dba96b2ea7edb8680d7
content/32: 304e608d459ef53f308e6ea1f6f8b54a
content/33: cb63e267fb16a7aaeea45c4ca29bf697
content/34: f00be560fcd4ff3f53d61c70c249597b
content/35: f7c266db4d07d040f8f788be598476cf
content/36: cd306281b5136831335e6376edb1e822
content/37: afb7b7f27d48deb3154da26135e17fb8
content/38: 179000198c9cd78601b5c862e9c8659f
content/39: 28c37db52f39323e125fcaf0e60911db
content/40: 8de5041c3c93b70619ec1723f657757f
content/41: 07fb2d6b16c75839a32d383f12419ca5
content/42: 65db7855a79ab283c6409e81a7703d19
content/43: 191fb7087315702a36001c69d745ebed
content/44: f6113edfd7a0062af4d88bcf31a73f45
content/45: 1e84fc1eee794c20e3411b3a34a02278
content/46: ec31e300f79185f734d32b1cfaf8a137
content/47: f7ad301d02e8826921644a5268f13f32
content/48: 025d60fdaf93713ccb34abcbc71dfa2b
content/49: 70a9ece41fdad09f3a06ca0efdb92ae9
content/50: 356d67409ae0d82a72d052573314f660
content/51: bb172f1678686d9d49666c516716de24
content/52: 529711647eccfdf031dbb5bc70581986
content/53: 9a84c92505eb468916637fcf2cef70f2
content/54: 225bca3fb37bd38cd645e8a698abbfa9
content/55: 33b9b1e9744318597da4b925b0995be2
content/56: 6afe3b62e6d53c3dcd07149abcab4c05
content/57: b6363faee219321c16d41a9c3f8d3bdd
content/58: 24ef65dd034a2881a978d8d0065fb258
content/59: b8b23ab79a7eb32c6f8d5f49f43c51f6
content/60: be358297e2bbb9ab4689d11d072611d1
content/61: b2eaadc86870d2e64b55c89e0348ef93
content/62: 450265802cb0ba5b435b74b9cac1bf23
content/63: b735ede8764e4b2dfb25967e33ab5143
content/64: 0f881e586a03c4b916456c73fad48358
content/65: 62bbbeca4e0500062f5cdbbc1614dde0
content/66: 55d47e12745c1b0b62c9bdf6e8449730
content/67: 1d873c7ccd87f564e2b30387b40ee9e9
content/68: 3304a33dfb626c6e2267c062e8956a9d
content/69: 77256b36307e9f7293bd00063239c8ee
content/70: ac686382ccbb07d75b0f141af500dfd5
content/71: 38f7308105b0843792c8e2fb93e1895d
content/72: 62f6977928b2f596ed7d54383d1e779d
content/73: 3415d6c5ad1df56b212d69519bdf0fea
content/74: d1a104e667cd2284ab5b3fead4a6ba1d
content/75: a81d7cd4a644a0061dad3a5973b4fe06
content/76: 981447969a71fd038049e9d9f40f4f8c
content/77: 531941216d31cb1947367c3c02127baa
content/78: bf1afa789fdfa5815faaf43574341e90
content/79: 5f2fe55d098d4e4f438af595708b2280
content/80: 41b8f7cf8899a0e92e255a3f845f9584
content/81: 5040bab65fb6bb77862f8098d16afbb5
content/82: a88260a5b5e23da73e4534376adeb193
content/83: e5e2329cdc226186fe9d44767528a4a0
content/84: 1773624e9ac3d5132b505894ef51977e
content/85: d62c9575cc66feec7589fba95c9f7aee
content/86: 7af652c5407ae7e156ab27b21a4f26d3
content/87: 4aa69b29cca745389dea8cd74eba4f83
content/88: 46877074b69519165997fa0968169611
content/89: d8ebc69b18baf83689ba315e7b4946ea
content/90: ecd571818ddf3d31b08b80a25958a662
content/91: 7dcdf2fbf3fce3f94987046506e12a9b
content/24: f8fbd9375113651be0f2498bdacde0ef
content/25: 2c57d87589b65f785e0fbbda60d32e54
content/26: 2541eb37fca67a6d7c5a10f8067127a3
content/27: 9fd0cd99a879360d355d91e9cfb41531
content/28: f6fed8ebf67ba12199b4474a754969ae
content/29: bcee3febe1be079e53aea841e2b08b3b
content/30: f00be560fcd4ff3f53d61c70c249597b
content/31: fa4fa1573c369fcc2eee57d7852caf9c
content/32: fa68c1f8c9ea3dba96b2ea7edb8680d7
content/33: 304e608d459ef53f308e6ea1f6f8b54a
content/34: cb63e267fb16a7aaeea45c4ca29bf697
content/35: f00be560fcd4ff3f53d61c70c249597b
content/36: f7c266db4d07d040f8f788be598476cf
content/37: d93b320646fde160c0fdd1936ee63cfb
content/38: c76e2089a41880dd6feac759ec8867c2
content/39: 0d61b9631788e64d1c1335b08c907107
content/40: 5ec50e6f56bd0a9a55fae14fa02185d9
content/41: 47bdc3ba4908bf1ce3d1a0a8f646b339
content/42: 5e8af7125448a6021a6ea431486dd587
content/43: 15017685691db74889cc6116373e44a5
content/44: 4d4ad5d56e800e5d227a07339300fc7f
content/45: c035728b4b81d006a18ba9ba7b9c638d
content/46: f1c9ad60574d19a5f93c837ab9d88890
content/47: 2c57d87589b65f785e0fbbda60d32e54
content/48: e7019a0e12f7295893c5822356fc0df0
content/49: 5912d8d9df5bbe435579d8eb0677685c
content/50: 4e1da4edce56837c750ce8da4c0e6cf2
content/51: 3d35097bb958e6eddd6976aeb1fe9e41
content/52: 78dce98d48ba070dbe100ee2a94cb17d
content/53: 38ec85acf292485e3dd837a29208fd2c
content/54: 58d582d90c8715f5570f76fed2be508d
content/55: 7d2b7134d447172c502b5f40fc3b38e6
content/56: 4a71171863d7329da6813b94772c0d4e
content/57: 1900d5b89dbca22d7a455bdc3367f0f5
content/58: 45126feb4fc831922a7edabfa2d54e4a
content/59: 65db7855a79ab283c6409e81a7703d19
content/60: 191fb7087315702a36001c69d745ebed
content/61: f6113edfd7a0062af4d88bcf31a73f45
content/62: 1e84fc1eee794c20e3411b3a34a02278
content/63: ec31e300f79185f734d32b1cfaf8a137
content/64: f7ad301d02e8826921644a5268f13f32
content/65: 025d60fdaf93713ccb34abcbc71dfa2b
content/66: 70a9ece41fdad09f3a06ca0efdb92ae9
content/67: 356d67409ae0d82a72d052573314f660
content/68: 5a80933fb21deea17a0a200564f0111b
content/69: 9527ba2ab5ddd8001baaaaf25f1a7acc
content/70: bb172f1678686d9d49666c516716de24
content/71: 529711647eccfdf031dbb5bc70581986
content/72: baa408b1603f35a8e24dd60b88773c72
content/73: c42a9f19d0678d8d1a36cf1f93e4a5ba
content/74: f6180f2341e8a7ae24afb05d7a185340
content/75: 8196e101e443ec2aac13cefd90a6d454
content/76: 9a84c92505eb468916637fcf2cef70f2
content/77: 225bca3fb37bd38cd645e8a698abbfa9
content/78: 7431c09b430effd69de843ee0fbaafe8
content/79: 33b9b1e9744318597da4b925b0995be2
content/80: 6afe3b62e6d53c3dcd07149abcab4c05
content/81: b6363faee219321c16d41a9c3f8d3bdd
content/82: 2449c8e8f55e2bf3f732527352d35c9f
content/83: b8b23ab79a7eb32c6f8d5f49f43c51f6
content/84: be358297e2bbb9ab4689d11d072611d1
content/85: eb774a8a86d778153905b0f6cdcdf517
content/86: 450265802cb0ba5b435b74b9cac1bf23
content/87: b735ede8764e4b2dfb25967e33ab5143
content/88: 0f881e586a03c4b916456c73fad48358
content/89: f51639ab2b7ccac72b850e2064e694e9
content/90: 55d47e12745c1b0b62c9bdf6e8449730
content/91: e6223d6aa9efa444282e58d7d9a99ced
content/92: 3304a33dfb626c6e2267c062e8956a9d
content/93: 77256b36307e9f7293bd00063239c8ee
content/94: ac686382ccbb07d75b0f141af500dfd5
content/95: 5610b6538a29672335b572d6f35d0657
content/96: 62f6977928b2f596ed7d54383d1e779d
content/97: 3415d6c5ad1df56b212d69519bdf0fea
content/98: 6bd60468d8cc072c5fe4214481fa9f60
content/99: a81d7cd4a644a0061dad3a5973b4fe06
content/100: 981447969a71fd038049e9d9f40f4f8c
content/101: 531941216d31cb1947367c3c02127baa
content/102: bf1afa789fdfa5815faaf43574341e90
content/103: 5f2fe55d098d4e4f438af595708b2280
content/104: 41b8f7cf8899a0e92e255a3f845f9584
content/105: 61ddd890032078ffd2da931b1d153b6d
content/106: 7873aa7487bc3e8a4826d65c1760a4a0
content/107: 98182d9aabe14d5bad43a5ee76a75eab
content/108: 2bdb01e4bcb08b1d99f192acf8e2fba7
content/109: 7079d9c00b1e1882c329b7e9b8f74552
content/110: 0f9d65eaf6e8de43c3d5fa7e62bc838d
content/111: 58c8e9d2d0ac37efd958203b8fbc8193
content/112: 7859d36a7a6d0122c0818b28ee29aa3e
content/113: ce185e7b041b8f95ebc11370d3e0aad9
content/114: 701e9bf4fd4d0669da0584eac5bd96e0
content/115: d1bab8ec5a51a9da5464eb47e2a16b50
content/116: da658275cc81a20f9cf7e4c66c7af1e3
content/117: 377d7c99a5df4b72166946573f7210b8
content/118: 3afc03a5ab1dc9db2bfa092b0ac4826a
content/119: 18ddfcaf2be4a6f1d9819407dad9ce7c
content/120: 2f6263b2e95f09f7e4842453f4bf4a0a
content/121: 4603578d6b314b662f45564a34ca430d
content/122: cf4c97eb254d0bd6ea6633344621c2c2
content/123: 7b4640989fab002039936156f857eb21
content/124: 65ca9f08745b47b4cce8ea8247d043bf
content/125: 162b4180611ff0a53b782e4dc8109293
content/126: 6b367a189eb53cb198e3666023def89c
content/127: dbb2125cefcf618849600c1eccae8a64
content/128: 04eedda0da3767b06e6017c559e05414
content/129: 661688450606eb09d8faee1468e88331
content/130: 8ff8367c3246103b3e3e02499e34ae0b
content/131: 44678bda9166f746da1d61b694ced482
content/132: a5e75db27c0a901f4cacf6598f450e6c
content/133: d1bab8ec5a51a9da5464eb47e2a16b50
content/134: da658275cc81a20f9cf7e4c66c7af1e3
content/135: 377d7c99a5df4b72166946573f7210b8
content/136: 3afc03a5ab1dc9db2bfa092b0ac4826a
content/137: 18ddfcaf2be4a6f1d9819407dad9ce7c
content/138: 2f6263b2e95f09f7e4842453f4bf4a0a
content/139: 4603578d6b314b662f45564a34ca430d
content/140: cf4c97eb254d0bd6ea6633344621c2c2
content/141: 7b4640989fab002039936156f857eb21
content/142: 65ca9f08745b47b4cce8ea8247d043bf
content/143: 162b4180611ff0a53b782e4dc8109293
content/144: 6b367a189eb53cb198e3666023def89c
content/145: dbb2125cefcf618849600c1eccae8a64
content/146: 04eedda0da3767b06e6017c559e05414
content/147: 661688450606eb09d8faee1468e88331
content/148: 8ff8367c3246103b3e3e02499e34ae0b
content/149: 44678bda9166f746da1d61b694ced482
content/150: 192a89879084dd7a74a6f44bcecae958
content/151: 41c2bb95317d7c0421817a2b1a68cc09
content/152: 4c95f9fa55f698f220577380dff95011
content/153: 9ef273d776aada1b2cff3452f08ff985
content/154: 100e12673551d4ceb5b906b1b9c65059
content/155: ce253674cd7c49320203cda2bdd3685b
content/156: 8910afcea8c205a28256eb30de6a1f26
content/157: 4d7ad757d2c70fdff7834146d38dddd8
content/158: a88260a5b5e23da73e4534376adeb193
content/159: e5e2329cdc226186fe9d44767528a4a0
content/160: 1773624e9ac3d5132b505894ef51977e
content/161: d62c9575cc66feec7589fba95c9f7aee
content/162: 7af652c5407ae7e156ab27b21a4f26d3
content/163: 4aa69b29cca745389dea8cd74eba4f83
content/164: 46877074b69519165997fa0968169611
content/165: 2e81908c18033109ac82a054b3fafd3d
content/166: ecd571818ddf3d31b08b80a25958a662
content/167: 7dcdf2fbf3fce3f94987046506e12a9b
27578f1315b6f1b7418d5e0d6042722e:
meta/title: 8c555594662512e95f28e20d3880f186
content/0: 9218a2e190598690d0fc5c27c30f01bb
content/1: feca29d7cbb17f461bc8706f142cb475
content/2: 65705e1bef9ddf2674454c20e77af61f
content/2: 9cb58e08402fc80050ad6a62cae3f643
content/3: 391128dee61b5d0d43eba88567aaef42
content/4: fa77bab0a8660a7999bf3104921aac5c
content/5: e8839cfb872185cea76973caaa7f84e0
@@ -2342,67 +2418,107 @@ checksums:
content/15: 64005abb7b5c1c3edef8970a8a7d17b2
content/16: 837ca74ccf63f23333c54e010faf681c
content/17: 626054376e08522e7195a60c34db9af8
content/18: 03c715df3c784e92ce1c0ce6a4dcd2e3
content/19: dcb92b9a1f222393f2e81cdae239885c
content/20: 2f5c7e73763a1884893739283f0d0659
content/21: f6fed8ebf67ba12199b4474a754969ae
content/22: c8f9a1d43885f2b9fe8b64c79d8af8b8
content/23: e1a2ca39583549a731d942082e1fa07c
content/24: 14e077bdb64d87457870efa215384654
content/25: c2e86eaf4b7d1cd53ed8172264337cc9
content/26: 304e608d459ef53f308e6ea1f6f8b54a
content/27: 9d04294f8385211535ed7622d164871f
content/28: e1a2ca39583549a731d942082e1fa07c
content/29: 279c20e11af33abb94993e8ea3e80669
content/30: eec7d8395f8cf305106deb7b25384ecf
content/31: 921824b44c391f8a0cdc5ce4cd283e77
content/32: d5aaccb9399a1255f986b703921594e5
content/33: dba855cc28255e4576026e3da0cdf05b
content/34: 17fdd93c6df75b108e352a62a195bc73
content/35: dcb92b9a1f222393f2e81cdae239885c
content/36: fb6fddfdf4753a36c7878ef60b345822
content/37: 191fb7087315702a36001c69d745ebed
content/38: 1ffef0a4e0d6a6bbca85776c113e1164
content/39: 61caafaf79e863df9525c4baf72c14e1
content/40: ec31e300f79185f734d32b1cfaf8a137
content/41: 65a172d64ffca3b03c6e0ed08f0bd821
content/42: 2db387754d7fb3539bcb986dfaac1c8c
content/43: e118d997ba48a5230ec70a564d436860
content/44: 77268362a748dafad471f31acfd230dc
content/45: b55b3773df2dfba66b6e675db7e2470e
content/46: 70a9ece41fdad09f3a06ca0efdb92ae9
content/47: 646ee615d86faf3b6a8da03115a30efa
content/48: bb172f1678686d9d49666c516716de24
content/49: a025b3b746d72e0f676f58703ee19a47
content/50: 9a84c92505eb468916637fcf2cef70f2
content/51: a4c78d85ed9be63b07b657166510f440
content/52: 33b9b1e9744318597da4b925b0995be2
content/53: 6afe3b62e6d53c3dcd07149abcab4c05
content/54: b6363faee219321c16d41a9c3f8d3bdd
content/55: f939bc99e05d04e1d52bf4b9ec3f1825
content/56: b8b23ab79a7eb32c6f8d5f49f43c51f6
content/57: be358297e2bbb9ab4689d11d072611d1
content/58: d8fcefba15a99bf4a9cf71c985097677
content/59: 7d098f0349c782f389431377ee512e92
content/60: 22b39537f6a104803389469d211154e4
content/61: 5dc147f9fe5e8117dfa6c94808c4ff54
content/62: f29d6bfd74ba3fee0b90180f620b4f47
content/63: 2a59466500b62e57481fe27692a3ed0f
content/64: d3ac9ea2a213cafb1f871dda8f6e6fe0
content/65: 450265802cb0ba5b435b74b9cac1bf23
content/66: b735ede8764e4b2dfb25967e33ab5143
content/67: 0f881e586a03c4b916456c73fad48358
content/68: 3f643fb43f3a022a449ded1e7c4db8bf
content/69: 55d47e12745c1b0b62c9bdf6e8449730
content/70: 166b3975e39841707381880ae4df3984
content/71: 3304a33dfb626c6e2267c062e8956a9d
content/72: a88260a5b5e23da73e4534376adeb193
content/73: cc31ae653c5642b223ec634888de29c6
content/74: 1773624e9ac3d5132b505894ef51977e
content/75: d62c9575cc66feec7589fba95c9f7aee
content/76: 8df5939abc771b5d24c115ef20d42d6f
content/77: ecd571818ddf3d31b08b80a25958a662
content/78: 7dcdf2fbf3fce3f94987046506e12a9b
content/18: 12153919e0229ac0a3699de043eae2a2
content/19: 59ceca96004d0746448717245eb65c5c
content/20: a0ff152e09498effe90572fe5cdfad1b
content/21: 2f5c7e73763a1884893739283f0d0659
content/22: f6fed8ebf67ba12199b4474a754969ae
content/23: c8f9a1d43885f2b9fe8b64c79d8af8b8
content/24: e1a2ca39583549a731d942082e1fa07c
content/25: 14e077bdb64d87457870efa215384654
content/26: c2e86eaf4b7d1cd53ed8172264337cc9
content/27: 304e608d459ef53f308e6ea1f6f8b54a
content/28: 9d04294f8385211535ed7622d164871f
content/29: e1a2ca39583549a731d942082e1fa07c
content/30: 279c20e11af33abb94993e8ea3e80669
content/31: 9e772c161a4b008c2f1db15a967d07ab
content/32: c76e2089a41880dd6feac759ec8867c2
content/33: 5d9a7b1e681cbe8f02def7eefabb0ac5
content/34: b4e0e90d40a60a024f64f80b193dcb48
content/35: b9f46c03c91c1070dd3ca0eba461f29b
content/36: fbecf63d14b56039ba44471f7a8afd4a
content/37: 58701f4ec097582ee105714a9363ccbe
content/38: 4d4ad5d56e800e5d227a07339300fc7f
content/39: 7f2a42a752279d7871064a21d0891b73
content/40: 8462e2271506b0545c62e5f70865a2f4
content/41: 59ceca96004d0746448717245eb65c5c
content/42: e7019a0e12f7295893c5822356fc0df0
content/43: 29d376146cd1149025028c61eb33e7ab
content/44: 4e1da4edce56837c750ce8da4c0e6cf2
content/45: 666a62d9fd54735b2adcad6277b3e07f
content/46: db012cfc3749d025f1dd40b5db1d9d63
content/47: 478fe7c3fbdd5e7d779691c9a09795c9
content/48: 58d582d90c8715f5570f76fed2be508d
content/49: 710baf5cf18c21cc284e70df97b36f40
content/50: 6363bbb118f3f51ca1b1acf3e9ec2f7c
content/51: 1900d5b89dbca22d7a455bdc3367f0f5
content/52: 959f29f44825109bf4bb16129896a8dd
content/53: fb6fddfdf4753a36c7878ef60b345822
content/54: 191fb7087315702a36001c69d745ebed
content/55: 1ffef0a4e0d6a6bbca85776c113e1164
content/56: 61caafaf79e863df9525c4baf72c14e1
content/57: ec31e300f79185f734d32b1cfaf8a137
content/58: 65a172d64ffca3b03c6e0ed08f0bd821
content/59: 2db387754d7fb3539bcb986dfaac1c8c
content/60: e118d997ba48a5230ec70a564d436860
content/61: 77268362a748dafad471f31acfd230dc
content/62: b55b3773df2dfba66b6e675db7e2470e
content/63: 70a9ece41fdad09f3a06ca0efdb92ae9
content/64: 646ee615d86faf3b6a8da03115a30efa
content/65: 5a80933fb21deea17a0a200564f0111b
content/66: a82d5e5fad0fbfd60ca97e5312d11941
content/67: bb172f1678686d9d49666c516716de24
content/68: a025b3b746d72e0f676f58703ee19a47
content/69: baa408b1603f35a8e24dd60b88773c72
content/70: c0cc113d0001826984f9c096c79cd18b
content/71: f6180f2341e8a7ae24afb05d7a185340
content/72: 3d414a5669f152cd296af27b61104858
content/73: 9a84c92505eb468916637fcf2cef70f2
content/74: a4c78d85ed9be63b07b657166510f440
content/75: 7431c09b430effd69de843ee0fbaafe8
content/76: 33b9b1e9744318597da4b925b0995be2
content/77: 6afe3b62e6d53c3dcd07149abcab4c05
content/78: b6363faee219321c16d41a9c3f8d3bdd
content/79: 08410ce9f0ec358b3c7230a56bc66399
content/80: b8b23ab79a7eb32c6f8d5f49f43c51f6
content/81: be358297e2bbb9ab4689d11d072611d1
content/82: 09fea7c0d742a0eefa77e982e848de6c
content/83: 7d098f0349c782f389431377ee512e92
content/84: 22b39537f6a104803389469d211154e4
content/85: d9ec74ab28b264d76f797fdae7c8f3d3
content/86: f29d6bfd74ba3fee0b90180f620b4f47
content/87: 2a59466500b62e57481fe27692a3ed0f
content/88: cbbb123fc3a12bf2ab72dc1bbe373a6e
content/89: 7873aa7487bc3e8a4826d65c1760a4a0
content/90: 98182d9aabe14d5bad43a5ee76a75eab
content/91: 67bfa8ae3e22d9a949f08c79a40b8df5
content/92: 7079d9c00b1e1882c329b7e9b8f74552
content/93: 0f9d65eaf6e8de43c3d5fa7e62bc838d
content/94: bcf0ce93a4493586ad32c20d9d2b285c
content/95: 7859d36a7a6d0122c0818b28ee29aa3e
content/96: ce185e7b041b8f95ebc11370d3e0aad9
content/97: dae96b41f0c029b464f02ac65d3c5796
content/98: 41c2bb95317d7c0421817a2b1a68cc09
content/99: 4c95f9fa55f698f220577380dff95011
content/100: 6695bd47a05f9963134d8a71abb3d298
content/101: 100e12673551d4ceb5b906b1b9c65059
content/102: ce253674cd7c49320203cda2bdd3685b
content/103: 94d4346a735149c2a83f6d2a21b8ab4c
content/104: 3ee4b16b8204ef3b5b7c0322ff636fab
content/105: 450265802cb0ba5b435b74b9cac1bf23
content/106: b735ede8764e4b2dfb25967e33ab5143
content/107: 0f881e586a03c4b916456c73fad48358
content/108: 4570af52d41ecda8d91e6bbe2bc19891
content/109: 55d47e12745c1b0b62c9bdf6e8449730
content/110: 82507d357ec8766f0173b9b1081c4c56
content/111: 3304a33dfb626c6e2267c062e8956a9d
content/112: a88260a5b5e23da73e4534376adeb193
content/113: cc31ae653c5642b223ec634888de29c6
content/114: 1773624e9ac3d5132b505894ef51977e
content/115: d62c9575cc66feec7589fba95c9f7aee
content/116: 8df5939abc771b5d24c115ef20d42d6f
content/117: ecd571818ddf3d31b08b80a25958a662
content/118: 7dcdf2fbf3fce3f94987046506e12a9b
004fe5dc5ca33719cb175f3619fe5208:
meta/title: be754b00d8a2c13c561e314f6f526515
content/0: 7e581dbf3e581d503ac94f7fb7938b1f
@@ -3970,7 +4086,25 @@ checksums:
content/7: e73f4b831f5b77c71d7d86c83abcbf11
content/8: 07e064793f3e0bbcb02c4dc6083b6daa
content/9: a702b191c3f94458bee880d33853e0cb
content/10: ce110ab5da3ff96f8cbf96ce3376fc51
content/11: 83f9b3ab46b0501c8eb3989bec3f4f1b
content/12: e00be80effb71b0acb014f9aa53dfbe1
content/13: 847a381137856ded9faa5994fbc489fb
content/10: c497057cbb9dd53599071f8550f327cd
content/11: cc6e48f85d5c6bfc05f846341f2d5cc9
content/12: 8a80a6a97da9bf375fac565f1caabb49
content/13: 098cc8e062187eb877fe5e172a4aa467
content/14: e452a7cb33d7cf2f7cf1804703edaa20
content/15: 466cfd61b1d0fcd8fc93d867dfd0f3e3
content/16: 377572316021236994f444e88949ef34
content/17: 54852933b2cbe3deb3b1c3059dba6a15
content/18: 9e66b045763abe053a3ba8d2c23e9aa1
content/19: d34f0950591e3beb085e99db64d07d2f
content/20: 8677ef07618f7289b04fef3cce8bf745
content/21: c0e6d2790e369569e7f272a5ec9ae21a
content/22: 93643a0d9d9745f131e4eabf7ead2018
content/23: 89c7da6d2e8fbc25e303a7381e147237
content/24: a8ec63597dc3a3564bc5f0c3a6e5f42c
content/25: 379618989b6cd427b319cfdab523297d
content/26: bc4c2e699a7514771276e90e9aee53ba
content/27: 38e14193b679ef774c3db93d399e700e
content/28: ce110ab5da3ff96f8cbf96ce3376fc51
content/29: 83f9b3ab46b0501c8eb3989bec3f4f1b
content/30: e00be80effb71b0acb014f9aa53dfbe1
content/31: 847a381137856ded9faa5994fbc489fb

View File

@@ -9,7 +9,7 @@ export function cn(...inputs: ClassValue[]) {
}
/**
* Get the full URL for an asset stored in Vercel Blob or local fallback
* Get the full URL for an asset stored in Vercel Blob
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
* - Otherwise falls back to local static assets served from root path
*/
@@ -20,12 +20,3 @@ export function getAssetUrl(filename: string) {
}
return `/${filename}`
}
/**
* Get the full URL for a video asset stored in Vercel Blob or local fallback
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
* - Otherwise falls back to local static assets served from root path
*/
export function getVideoUrl(filename: string) {
return getAssetUrl(filename)
}

View File

@@ -2,7 +2,6 @@
import { useEffect, useState } from 'react'
import Image from 'next/image'
import { getAssetUrl } from '@/lib/utils'
import { inter } from '@/app/fonts/inter'
interface Testimonial {
@@ -14,7 +13,6 @@ interface Testimonial {
profileImage: string
}
// Import all testimonials
const allTestimonials: Testimonial[] = [
{
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
@@ -22,7 +20,7 @@ const allTestimonials: Testimonial[] = [
username: '@hasantoxr',
viewCount: '515k',
tweetUrl: 'https://x.com/hasantoxr/status/1912909502036525271',
profileImage: getAssetUrl('twitter/hasan.jpg'),
profileImage: '/twitter/hasan.jpg',
},
{
text: "Drag-and-drop AI workflows for devs who'd rather build agents than babysit them.",
@@ -30,7 +28,7 @@ const allTestimonials: Testimonial[] = [
username: '@GithubProjects',
viewCount: '90.4k',
tweetUrl: 'https://x.com/GithubProjects/status/1906383555707490499',
profileImage: getAssetUrl('twitter/github-projects.jpg'),
profileImage: '/twitter/github-projects.jpg',
},
{
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
@@ -38,7 +36,7 @@ const allTestimonials: Testimonial[] = [
username: '@lazukars',
viewCount: '47.4k',
tweetUrl: 'https://x.com/lazukars/status/1913136390503600575',
profileImage: getAssetUrl('twitter/lazukars.png'),
profileImage: '/twitter/lazukars.png',
},
{
text: 'omfggggg this is the zapier of agent building\n\ni always believed that building agents and using ai should not be limited to technical people. i think this solves just that\n\nthe fact that this is also open source makes me so optimistic about the future of building with ai :)))\n\ncongrats @karabegemir & @typingwala !!!',
@@ -46,7 +44,7 @@ const allTestimonials: Testimonial[] = [
username: '@nizzyabi',
viewCount: '6,269',
tweetUrl: 'https://x.com/nizzyabi/status/1907864421227180368',
profileImage: getAssetUrl('twitter/nizzy.jpg'),
profileImage: '/twitter/nizzy.jpg',
},
{
text: 'A very good looking agent workflow builder 🔥 and open source!',
@@ -54,7 +52,7 @@ const allTestimonials: Testimonial[] = [
username: '@xyflowdev',
viewCount: '3,246',
tweetUrl: 'https://x.com/xyflowdev/status/1909501499719438670',
profileImage: getAssetUrl('twitter/xyflow.jpg'),
profileImage: '/twitter/xyflow.jpg',
},
{
text: "One of the best products I've seen in the space, and the hustle and grind I've seen from @karabegemir and @typingwala is insane. Sim is positioned to build something game-changing, and there's no better team for the job.\n\nCongrats on the launch 🚀 🎊 great things ahead!",
@@ -62,7 +60,7 @@ const allTestimonials: Testimonial[] = [
username: '@firestorm776',
viewCount: '1,256',
tweetUrl: 'https://x.com/firestorm776/status/1907896097735061598',
profileImage: getAssetUrl('twitter/samarth.jpg'),
profileImage: '/twitter/samarth.jpg',
},
{
text: 'lfgg got access to @simstudioai via @zerodotemail 😎',
@@ -70,7 +68,7 @@ const allTestimonials: Testimonial[] = [
username: '@nizzyabi',
viewCount: '1,762',
tweetUrl: 'https://x.com/nizzyabi/status/1910482357821595944',
profileImage: getAssetUrl('twitter/nizzy.jpg'),
profileImage: '/twitter/nizzy.jpg',
},
{
text: 'Feels like we\'re finally getting a "Photoshop moment" for AI devs—visual, intuitive, and fast enough to keep up with ideas mid-flow.',
@@ -78,7 +76,7 @@ const allTestimonials: Testimonial[] = [
username: '@syamrajk',
viewCount: '2,784',
tweetUrl: 'https://x.com/syamrajk/status/1912911980110946491',
profileImage: getAssetUrl('twitter/syamrajk.jpg'),
profileImage: '/twitter/syamrajk.jpg',
},
{
text: 'The use cases are endless. Great work @simstudioai',
@@ -86,7 +84,7 @@ const allTestimonials: Testimonial[] = [
username: '@daniel_zkim',
viewCount: '103',
tweetUrl: 'https://x.com/daniel_zkim/status/1907891273664782708',
profileImage: getAssetUrl('twitter/daniel.jpg'),
profileImage: '/twitter/daniel.jpg',
},
]
@@ -95,11 +93,9 @@ export default function Testimonials() {
const [isTransitioning, setIsTransitioning] = useState(false)
const [isPaused, setIsPaused] = useState(false)
// Create an extended array for smooth infinite scrolling
const extendedTestimonials = [...allTestimonials, ...allTestimonials]
useEffect(() => {
// Set up automatic sliding every 3 seconds
const interval = setInterval(() => {
if (!isPaused) {
setIsTransitioning(true)
@@ -110,17 +106,15 @@ export default function Testimonials() {
return () => clearInterval(interval)
}, [isPaused])
// Reset position when reaching the end for infinite loop
useEffect(() => {
if (currentIndex >= allTestimonials.length) {
setTimeout(() => {
setIsTransitioning(false)
setCurrentIndex(0)
}, 500) // Match transition duration
}, 500)
}
}, [currentIndex])
// Calculate the transform value
const getTransformValue = () => {
// Each card unit (card + separator) takes exactly 25% width
return `translateX(-${currentIndex * 25}%)`

View File

@@ -403,7 +403,10 @@ export function mockExecutionDependencies() {
provider: 'provider',
providerConfig: 'providerConfig',
},
workflow: { id: 'id', userId: 'userId' },
workflow: {
id: 'id',
userId: 'userId',
},
workflowSchedule: {
id: 'id',
workflowId: 'workflowId',

View File

@@ -27,7 +27,7 @@ describe('Chat Identifier API Route', () => {
const mockAddCorsHeaders = vi.fn().mockImplementation((response) => response)
const mockValidateChatAuth = vi.fn().mockResolvedValue({ authorized: true })
const mockSetChatAuthCookie = vi.fn()
const mockExecuteWorkflowForChat = vi.fn().mockResolvedValue(createMockStream())
const mockCreateStreamingResponse = vi.fn().mockResolvedValue(createMockStream())
const mockChatResult = [
{
@@ -72,7 +72,16 @@ describe('Chat Identifier API Route', () => {
validateChatAuth: mockValidateChatAuth,
setChatAuthCookie: mockSetChatAuthCookie,
validateAuthToken: vi.fn().mockReturnValue(true),
executeWorkflowForChat: mockExecuteWorkflowForChat,
}))
vi.doMock('@/lib/workflows/streaming', () => ({
createStreamingResponse: mockCreateStreamingResponse,
SSE_HEADERS: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
'X-Accel-Buffering': 'no',
},
}))
vi.doMock('@/lib/logs/console/logger', () => ({
@@ -369,8 +378,23 @@ describe('Chat Identifier API Route', () => {
expect(response.headers.get('Cache-Control')).toBe('no-cache')
expect(response.headers.get('Connection')).toBe('keep-alive')
// Verify executeWorkflowForChat was called with correct parameters
expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', 'conv-123')
// Verify createStreamingResponse was called with correct workflow info
expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
expect.objectContaining({
workflow: expect.objectContaining({
id: 'workflow-id',
userId: 'user-id',
}),
input: expect.objectContaining({
input: 'Hello world',
conversationId: 'conv-123',
}),
streamConfig: expect.objectContaining({
isSecureMode: true,
workflowTriggerType: 'chat',
}),
})
)
})
it('should handle streaming response body correctly', async () => {
@@ -399,8 +423,8 @@ describe('Chat Identifier API Route', () => {
})
it('should handle workflow execution errors gracefully', async () => {
const originalExecuteWorkflow = mockExecuteWorkflowForChat.getMockImplementation()
mockExecuteWorkflowForChat.mockImplementationOnce(async () => {
const originalStreamingResponse = mockCreateStreamingResponse.getMockImplementation()
mockCreateStreamingResponse.mockImplementationOnce(async () => {
throw new Error('Execution failed')
})
@@ -417,8 +441,8 @@ describe('Chat Identifier API Route', () => {
expect(data).toHaveProperty('error')
expect(data).toHaveProperty('message', 'Execution failed')
if (originalExecuteWorkflow) {
mockExecuteWorkflowForChat.mockImplementation(originalExecuteWorkflow)
if (originalStreamingResponse) {
mockCreateStreamingResponse.mockImplementation(originalStreamingResponse)
}
})
@@ -443,7 +467,7 @@ describe('Chat Identifier API Route', () => {
expect(data).toHaveProperty('message', 'Invalid request body')
})
it('should pass conversationId to executeWorkflowForChat when provided', async () => {
it('should pass conversationId to streaming execution when provided', async () => {
const req = createMockRequest('POST', {
input: 'Hello world',
conversationId: 'test-conversation-123',
@@ -454,10 +478,13 @@ describe('Chat Identifier API Route', () => {
await POST(req, { params })
expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith(
'chat-id',
'Hello world',
'test-conversation-123'
expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
expect.objectContaining({
input: expect.objectContaining({
input: 'Hello world',
conversationId: 'test-conversation-123',
}),
})
)
})
@@ -469,7 +496,13 @@ describe('Chat Identifier API Route', () => {
await POST(req, { params })
expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', undefined)
expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
expect.objectContaining({
input: expect.objectContaining({
input: 'Hello world',
}),
})
)
})
})
})

View File

@@ -6,7 +6,6 @@ import { createLogger } from '@/lib/logs/console/logger'
import { generateRequestId } from '@/lib/utils'
import {
addCorsHeaders,
executeWorkflowForChat,
setChatAuthCookie,
validateAuthToken,
validateChatAuth,
@@ -15,6 +14,9 @@ import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/
const logger = createLogger('ChatIdentifierAPI')
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
// This endpoint handles chat interactions via the identifier
export async function POST(
request: NextRequest,
@@ -106,18 +108,37 @@ export async function POST(
}
try {
// Execute workflow with structured input (input + conversationId for context)
const result = await executeWorkflowForChat(deployment.id, input, conversationId)
// Transform outputConfigs to selectedOutputs format (blockId_attribute format)
const selectedOutputs: string[] = []
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
for (const config of deployment.outputConfigs) {
const outputId = config.path
? `${config.blockId}_${config.path}`
: `${config.blockId}_content`
selectedOutputs.push(outputId)
}
}
// The result is always a ReadableStream that we can pipe to the client
const streamResponse = new NextResponse(result, {
status: 200,
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
'X-Accel-Buffering': 'no',
const { createStreamingResponse } = await import('@/lib/workflows/streaming')
const { SSE_HEADERS } = await import('@/lib/utils')
const { createFilteredResult } = await import('@/app/api/workflows/[id]/execute/route')
const stream = await createStreamingResponse({
requestId,
workflow: { id: deployment.workflowId, userId: deployment.userId, isDeployed: true },
input: { input, conversationId }, // Format for chat_trigger
executingUserId: deployment.userId, // Use workflow owner's ID for chat deployments
streamConfig: {
selectedOutputs,
isSecureMode: true,
workflowTriggerType: 'chat',
},
createFilteredResult,
})
const streamResponse = new NextResponse(stream, {
status: 200,
headers: SSE_HEADERS,
})
return addCorsHeaders(streamResponse, request)
} catch (error: any) {

View File

@@ -416,7 +416,7 @@ describe('Chat API Utils', () => {
execution: executionResult,
}
// Simulate the type extraction logic from executeWorkflowForChat
// Test that streaming execution wraps the result correctly
const extractedFromStreaming =
streamingResult && typeof streamingResult === 'object' && 'execution' in streamingResult
? streamingResult.execution

View File

@@ -1,29 +1,11 @@
import { db } from '@sim/db'
import { chat, userStats, workflow } from '@sim/db/schema'
import { eq, sql } from 'drizzle-orm'
import { chat, workflow } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { isDev } from '@/lib/environment'
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import { hasAdminPermission } from '@/lib/permissions/utils'
import { processStreamingBlockLogs } from '@/lib/tokenization'
import { decryptSecret, generateRequestId } from '@/lib/utils'
import { TriggerUtils } from '@/lib/workflows/triggers'
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
import { getBlock } from '@/blocks'
import { Executor } from '@/executor'
import type { BlockLog, ExecutionResult } from '@/executor/types'
import { Serializer } from '@/serializer'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
declare global {
var __chatStreamProcessingTasks: Promise<{ success: boolean; error?: any }>[] | undefined
}
import { decryptSecret } from '@/lib/utils'
const logger = createLogger('ChatAuthUtils')
@@ -281,586 +263,3 @@ export async function validateChatAuth(
// Unknown auth type
return { authorized: false, error: 'Unsupported authentication type' }
}
/**
* Executes a workflow for a chat request and returns the formatted output.
*
* When workflows reference <start.input>, they receive the input directly.
* The conversationId is available at <start.conversationId> for maintaining chat context.
*
* @param chatId - Chat deployment identifier
* @param input - User's chat input
* @param conversationId - Optional ID for maintaining conversation context
* @returns Workflow execution result formatted for the chat interface
*/
export async function executeWorkflowForChat(
chatId: string,
input: string,
conversationId?: string
): Promise<any> {
const requestId = generateRequestId()
logger.debug(
`[${requestId}] Executing workflow for chat: ${chatId}${
conversationId ? `, conversationId: ${conversationId}` : ''
}`
)
// Find the chat deployment
const deploymentResult = await db
.select({
id: chat.id,
workflowId: chat.workflowId,
userId: chat.userId,
outputConfigs: chat.outputConfigs,
customizations: chat.customizations,
})
.from(chat)
.where(eq(chat.id, chatId))
.limit(1)
if (deploymentResult.length === 0) {
logger.warn(`[${requestId}] Chat not found: ${chatId}`)
throw new Error('Chat not found')
}
const deployment = deploymentResult[0]
const workflowId = deployment.workflowId
const executionId = uuidv4()
const usageCheck = await checkServerSideUsageLimits(deployment.userId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${deployment.userId} has exceeded usage limits. Skipping chat execution.`,
{
currentUsage: usageCheck.currentUsage,
limit: usageCheck.limit,
workflowId: deployment.workflowId,
chatId,
}
)
throw new Error(usageCheck.message || CHAT_ERROR_MESSAGES.USAGE_LIMIT_EXCEEDED)
}
// Set up logging for chat execution
const loggingSession = new LoggingSession(workflowId, executionId, 'chat', requestId)
// Check for multi-output configuration in customizations
const customizations = (deployment.customizations || {}) as Record<string, any>
let outputBlockIds: string[] = []
// Extract output configs from the new schema format
let selectedOutputIds: string[] = []
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
// Extract output IDs in the format expected by the streaming processor
logger.debug(
`[${requestId}] Found ${deployment.outputConfigs.length} output configs in deployment`
)
selectedOutputIds = deployment.outputConfigs.map((config) => {
const outputId = config.path
? `${config.blockId}_${config.path}`
: `${config.blockId}.content`
logger.debug(
`[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'content'} -> outputId=${outputId}`
)
return outputId
})
// Also extract block IDs for legacy compatibility
outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
} else {
// Use customizations as fallback
outputBlockIds = Array.isArray(customizations.outputBlockIds)
? customizations.outputBlockIds
: []
}
// Fall back to customizations if we still have no outputs
if (
outputBlockIds.length === 0 &&
customizations.outputBlockIds &&
customizations.outputBlockIds.length > 0
) {
outputBlockIds = customizations.outputBlockIds
}
logger.debug(
`[${requestId}] Using ${outputBlockIds.length} output blocks and ${selectedOutputIds.length} selected output IDs for extraction`
)
// Find the workflow to check if it's deployed
const workflowResult = await db
.select({
isDeployed: workflow.isDeployed,
variables: workflow.variables,
workspaceId: workflow.workspaceId,
})
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
if (workflowResult.length === 0 || !workflowResult[0].isDeployed) {
logger.warn(`[${requestId}] Workflow not found or not deployed: ${workflowId}`)
throw new Error('Workflow not available')
}
// Load the active deployed state from the deployment versions table
const { loadDeployedWorkflowState } = await import('@/lib/workflows/db-helpers')
let deployedState: WorkflowState
try {
deployedState = await loadDeployedWorkflowState(workflowId)
} catch (error) {
logger.error(`[${requestId}] Failed to load deployed state for workflow ${workflowId}:`, error)
throw new Error(`Workflow must be deployed to be available for chat`)
}
const { blocks, edges, loops, parallels } = deployedState
// Prepare for execution, similar to use-workflow-execution.ts
const mergedStates = mergeSubblockState(blocks)
const filteredStates = Object.entries(mergedStates).reduce(
(acc, [id, block]) => {
const blockConfig = getBlock(block.type)
const isTriggerBlock = blockConfig?.category === 'triggers'
const isChatTrigger = block.type === 'chat_trigger'
// Keep all non-trigger blocks and also keep the chat_trigger block
if (!isTriggerBlock || isChatTrigger) {
acc[id] = block
}
return acc
},
{} as typeof mergedStates
)
const currentBlockStates = Object.entries(filteredStates).reduce(
(acc, [id, block]) => {
acc[id] = Object.entries(block.subBlocks).reduce(
(subAcc, [key, subBlock]) => {
subAcc[key] = subBlock.value
return subAcc
},
{} as Record<string, any>
)
return acc
},
{} as Record<string, Record<string, any>>
)
// Get user environment variables with workspace precedence
let envVars: Record<string, string> = {}
try {
const workspaceId = workflowResult[0].workspaceId || undefined
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
deployment.userId,
workspaceId
)
envVars = { ...personalEncrypted, ...workspaceEncrypted }
} catch (error) {
logger.warn(`[${requestId}] Could not fetch environment variables:`, error)
}
let workflowVariables = {}
try {
if (workflowResult[0].variables) {
workflowVariables =
typeof workflowResult[0].variables === 'string'
? JSON.parse(workflowResult[0].variables)
: workflowResult[0].variables
}
} catch (error) {
logger.warn(`[${requestId}] Could not parse workflow variables:`, error)
}
// Filter edges to exclude connections to/from trigger blocks (same as manual execution)
const triggerBlockIds = Object.keys(mergedStates).filter((id) => {
const type = mergedStates[id].type
const blockConfig = getBlock(type)
// Exclude chat_trigger from the list so its edges are preserved
return blockConfig?.category === 'triggers' && type !== 'chat_trigger'
})
const filteredEdges = edges.filter(
(edge) => !triggerBlockIds.includes(edge.source) && !triggerBlockIds.includes(edge.target)
)
// Create serialized workflow with filtered blocks and edges
const serializedWorkflow = new Serializer().serializeWorkflow(
filteredStates,
filteredEdges,
loops,
parallels,
true // Enable validation during execution
)
// Decrypt environment variables
const decryptedEnvVars: Record<string, string> = {}
for (const [key, encryptedValue] of Object.entries(envVars)) {
try {
const { decrypted } = await decryptSecret(encryptedValue)
decryptedEnvVars[key] = decrypted
} catch (error: any) {
logger.error(`[${requestId}] Failed to decrypt environment variable "${key}"`, error)
// Log but continue - we don't want to break execution if just one var fails
}
}
// Process block states to ensure response formats are properly parsed
const processedBlockStates = Object.entries(currentBlockStates).reduce(
(acc, [blockId, blockState]) => {
// Check if this block has a responseFormat that needs to be parsed
if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
try {
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
// Attempt to parse the responseFormat if it's a string
const parsedResponseFormat = JSON.parse(blockState.responseFormat)
acc[blockId] = {
...blockState,
responseFormat: parsedResponseFormat,
}
} catch (error) {
logger.warn(`[${requestId}] Failed to parse responseFormat for block ${blockId}`, error)
acc[blockId] = blockState
}
} else {
acc[blockId] = blockState
}
return acc
},
{} as Record<string, Record<string, any>>
)
// Start logging session
await loggingSession.safeStart({
userId: deployment.userId,
workspaceId: workflowResult[0].workspaceId || '',
variables: workflowVariables,
})
let sessionCompleted = false
const stream = new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
try {
const streamedContent = new Map<string, string>()
const streamedBlocks = new Set<string>() // Track which blocks have started streaming
const onStream = async (streamingExecution: any): Promise<void> => {
if (!streamingExecution.stream) return
const blockId = streamingExecution.execution?.blockId
const reader = streamingExecution.stream.getReader()
if (blockId) {
streamedContent.set(blockId, '')
// Add separator if this is not the first block to stream
if (streamedBlocks.size > 0) {
// Send separator before the new block starts
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ blockId, chunk: '\n\n' })}\n\n`)
)
}
streamedBlocks.add(blockId)
}
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ blockId, event: 'end' })}\n\n`)
)
break
}
const chunk = new TextDecoder().decode(value)
if (blockId) {
streamedContent.set(blockId, (streamedContent.get(blockId) || '') + chunk)
}
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ blockId, chunk })}\n\n`))
}
} catch (error) {
logger.error('Error while reading from stream:', error)
controller.error(error)
}
}
// Determine the start block for chat execution BEFORE creating executor
const startBlock = TriggerUtils.findStartBlock(mergedStates, 'chat')
if (!startBlock) {
const errorMessage = CHAT_ERROR_MESSAGES.NO_CHAT_TRIGGER
logger.error(`[${requestId}] ${errorMessage}`)
if (!sessionCompleted) {
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: { message: errorMessage },
})
sessionCompleted = true
}
// Send error event that the client expects
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({
event: 'error',
error: CHAT_ERROR_MESSAGES.GENERIC_ERROR,
})}\n\n`
)
)
controller.close()
return
}
const startBlockId = startBlock.blockId
// Create executor AFTER confirming we have a chat trigger
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: { input: input, conversationId },
workflowVariables,
contextExtensions: {
stream: true,
selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
edges: filteredEdges.map((e: any) => ({
source: e.source,
target: e.target,
})),
onStream,
isDeployedContext: true,
},
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
let result
try {
result = await executor.execute(workflowId, startBlockId)
} catch (error: any) {
logger.error(`[${requestId}] Chat workflow execution failed:`, error)
if (!sessionCompleted) {
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: { message: error.message || 'Chat workflow execution failed' },
})
sessionCompleted = true
}
// Send error to stream before ending
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({
event: 'error',
error: error.message || 'Chat workflow execution failed',
})}\n\n`
)
)
controller.close()
return // Don't throw - just return to end the stream gracefully
}
// Handle both ExecutionResult and StreamingExecution types
const executionResult =
result && typeof result === 'object' && 'execution' in result
? (result.execution as ExecutionResult)
: (result as ExecutionResult)
if (executionResult?.logs) {
// Update streamed content and apply tokenization - process regardless of overall success
// This ensures partial successes (some agents succeed, some fail) still return results
// Add newlines between different agent outputs for better readability
const processedOutputs = new Set<string>()
executionResult.logs.forEach((log: BlockLog) => {
if (streamedContent.has(log.blockId)) {
const content = streamedContent.get(log.blockId)
if (log.output && content) {
// Add newline separation between different outputs (but not before the first one)
const separator = processedOutputs.size > 0 ? '\n\n' : ''
log.output.content = separator + content
processedOutputs.add(log.blockId)
}
}
})
// Also process non-streamed outputs from selected blocks (like function blocks)
// This uses the same logic as the chat panel to ensure identical behavior
const nonStreamingLogs = executionResult.logs.filter(
(log: BlockLog) => !streamedContent.has(log.blockId)
)
// Extract the exact same functions used by the chat panel
const extractBlockIdFromOutputId = (outputId: string): string => {
return outputId.includes('_') ? outputId.split('_')[0] : outputId.split('.')[0]
}
const extractPathFromOutputId = (outputId: string, blockId: string): string => {
return outputId.substring(blockId.length + 1)
}
const parseOutputContentSafely = (output: any): any => {
if (!output?.content) {
return output
}
if (typeof output.content === 'string') {
try {
return JSON.parse(output.content)
} catch (e) {
// Fallback to original structure if parsing fails
return output
}
}
return output
}
// Filter outputs that have matching logs (exactly like chat panel)
const outputsToRender = selectedOutputIds.filter((outputId) => {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
})
// Process each selected output (exactly like chat panel)
for (const outputId of outputsToRender) {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
const path = extractPathFromOutputId(outputId, blockIdForOutput)
const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
if (log) {
let outputValue: any = log.output
if (path) {
// Parse JSON content safely (exactly like chat panel)
outputValue = parseOutputContentSafely(outputValue)
const pathParts = path.split('.')
for (const part of pathParts) {
if (outputValue && typeof outputValue === 'object' && part in outputValue) {
outputValue = outputValue[part]
} else {
outputValue = undefined
break
}
}
}
if (outputValue !== undefined) {
// Add newline separation between different outputs
const separator = processedOutputs.size > 0 ? '\n\n' : ''
// Format the output exactly like the chat panel
const formattedOutput =
typeof outputValue === 'string'
? outputValue
: JSON.stringify(outputValue, null, 2)
// Update the log content
if (!log.output.content) {
log.output.content = separator + formattedOutput
} else {
log.output.content = separator + formattedOutput
}
processedOutputs.add(log.blockId)
}
}
}
// Process all logs for streaming tokenization
const processedCount = processStreamingBlockLogs(executionResult.logs, streamedContent)
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
const enrichedResult = { ...executionResult, traceSpans, totalDuration }
if (conversationId) {
if (!enrichedResult.metadata) {
enrichedResult.metadata = {
duration: totalDuration,
startTime: new Date().toISOString(),
}
}
;(enrichedResult.metadata as any).conversationId = conversationId
}
// Use the executionId created at the beginning of this function
logger.debug(`Using execution ID for deployed chat: ${executionId}`)
if (executionResult.success) {
try {
await db
.update(userStats)
.set({
totalChatExecutions: sql`total_chat_executions + 1`,
lastActive: new Date(),
})
.where(eq(userStats.userId, deployment.userId))
logger.debug(`Updated user stats for deployed chat: ${deployment.userId}`)
} catch (error) {
logger.error(`Failed to update user stats for deployed chat:`, error)
}
}
}
if (!(result && typeof result === 'object' && 'stream' in result)) {
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
)
}
if (!sessionCompleted) {
const resultForTracing =
executionResult || ({ success: true, output: {}, logs: [] } as ExecutionResult)
const { traceSpans } = buildTraceSpans(resultForTracing)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: executionResult?.metadata?.duration || 0,
finalOutput: executionResult?.output || {},
traceSpans,
})
sessionCompleted = true
}
controller.close()
} catch (error: any) {
// Handle any errors that occur in the stream
logger.error(`[${requestId}] Stream error:`, error)
// Send error event to client
const encoder = new TextEncoder()
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({
event: 'error',
error: error.message || 'An unexpected error occurred',
})}\n\n`
)
)
// Try to complete the logging session with error if not already completed
if (!sessionCompleted && loggingSession) {
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
error: { message: error.message || 'Stream processing error' },
})
sessionCompleted = true
}
controller.close()
}
},
})
return stream
}

View File

@@ -234,6 +234,7 @@ describe('Knowledge Base By ID API Route', () => {
{
name: validUpdateData.name,
description: validUpdateData.description,
workspaceId: undefined,
chunkingConfig: undefined,
},
expect.any(String)

View File

@@ -103,6 +103,7 @@ export async function PUT(req: NextRequest, { params }: { params: Promise<{ id:
{
name: validatedData.name,
description: validatedData.description,
workspaceId: validatedData.workspaceId,
chunkingConfig: validatedData.chunkingConfig,
},
requestId

View File

@@ -4,6 +4,7 @@ import { and, eq, lte, not, sql } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { getApiKeyOwnerUserId } from '@/lib/api-key/service'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
@@ -17,7 +18,7 @@ import {
getSubBlockValue,
} from '@/lib/schedules/utils'
import { decryptSecret, generateRequestId } from '@/lib/utils'
import { loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
import { blockExistsInDeployment, loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
import { Executor } from '@/executor'
import { Serializer } from '@/serializer'
@@ -106,12 +107,22 @@ export async function GET() {
continue
}
const actorUserId = await getApiKeyOwnerUserId(workflowRecord.pinnedApiKeyId)
if (!actorUserId) {
logger.warn(
`[${requestId}] Skipping schedule ${schedule.id}: pinned API key required to attribute usage.`
)
runningExecutions.delete(schedule.workflowId)
continue
}
// Check rate limits for scheduled execution (checks both personal and org subscriptions)
const userSubscription = await getHighestPrioritySubscription(workflowRecord.userId)
const userSubscription = await getHighestPrioritySubscription(actorUserId)
const rateLimiter = new RateLimiter()
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
workflowRecord.userId,
actorUserId,
userSubscription,
'schedule',
false // schedules are always sync
@@ -149,7 +160,7 @@ export async function GET() {
continue
}
const usageCheck = await checkServerSideUsageLimits(workflowRecord.userId)
const usageCheck = await checkServerSideUsageLimits(actorUserId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${workflowRecord.userId} has exceeded usage limits. Skipping scheduled execution.`,
@@ -159,26 +170,19 @@ export async function GET() {
workflowId: schedule.workflowId,
}
)
// Error logging handled by logging session
const retryDelay = 24 * 60 * 60 * 1000 // 24 hour delay for exceeded limits
const nextRetryAt = new Date(now.getTime() + retryDelay)
try {
const deployedData = await loadDeployedWorkflowState(schedule.workflowId)
const nextRunAt = calculateNextRunTime(schedule, deployedData.blocks as any)
await db
.update(workflowSchedule)
.set({
updatedAt: now,
nextRunAt: nextRetryAt,
})
.set({ updatedAt: now, nextRunAt })
.where(eq(workflowSchedule.id, schedule.id))
logger.debug(`[${requestId}] Updated next retry time due to usage limits`)
} catch (updateError) {
logger.error(`[${requestId}] Error updating schedule for usage limits:`, updateError)
} catch (calcErr) {
logger.warn(
`[${requestId}] Unable to calculate nextRunAt while skipping schedule ${schedule.id}`,
calcErr
)
}
runningExecutions.delete(schedule.workflowId)
continue
}
@@ -206,11 +210,25 @@ export async function GET() {
const parallels = deployedData.parallels
logger.info(`[${requestId}] Loaded deployed workflow ${schedule.workflowId}`)
// Validate that the schedule's trigger block exists in the deployed state
if (schedule.blockId) {
const blockExists = await blockExistsInDeployment(
schedule.workflowId,
schedule.blockId
)
if (!blockExists) {
logger.warn(
`[${requestId}] Schedule trigger block ${schedule.blockId} not found in deployed workflow ${schedule.workflowId}. Skipping execution.`
)
return { skip: true, blocks: {} as Record<string, BlockState> }
}
}
const mergedStates = mergeSubblockState(blocks)
// Retrieve environment variables with workspace precedence
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
workflowRecord.userId,
actorUserId,
workflowRecord.workspaceId || undefined
)
const variables = EnvVarsSchema.parse({
@@ -355,7 +373,6 @@ export async function GET() {
)
const input = {
workflowId: schedule.workflowId,
_context: {
workflowId: schedule.workflowId,
},
@@ -363,7 +380,7 @@ export async function GET() {
// Start logging with environment variables
await loggingSession.safeStart({
userId: workflowRecord.userId,
userId: actorUserId,
workspaceId: workflowRecord.workspaceId || '',
variables: variables || {},
})
@@ -407,7 +424,7 @@ export async function GET() {
totalScheduledExecutions: sql`total_scheduled_executions + 1`,
lastActive: now,
})
.where(eq(userStats.userId, workflowRecord.userId))
.where(eq(userStats.userId, actorUserId))
logger.debug(`[${requestId}] Updated user stats for scheduled execution`)
} catch (statsError) {
@@ -446,6 +463,7 @@ export async function GET() {
message: `Schedule execution failed before workflow started: ${earlyError.message}`,
stackTrace: earlyError.stack,
},
traceSpans: [],
})
} catch (loggingError) {
logger.error(
@@ -459,6 +477,12 @@ export async function GET() {
}
})()
// Check if execution was skipped (e.g., trigger block not found)
if ('skip' in executionSuccess && executionSuccess.skip) {
runningExecutions.delete(schedule.workflowId)
continue
}
if (executionSuccess.success) {
logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`)
@@ -565,6 +589,7 @@ export async function GET() {
message: `Schedule execution failed: ${error.message}`,
stackTrace: error.stack,
},
traceSpans: [],
})
} catch (loggingError) {
logger.error(

View File

@@ -106,6 +106,24 @@ describe('Webhook Trigger API Route', () => {
mockExecutionDependencies()
mockTriggerDevSdk()
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
pinnedApiKeyId: 'test-pinned-api-key-id',
})
vi.doMock('@/lib/api-key/service', async () => {
const actual = await vi.importActual('@/lib/api-key/service')
return {
...(actual as Record<string, unknown>),
getApiKeyOwnerUserId: vi
.fn()
.mockImplementation(async (pinnedApiKeyId: string | null | undefined) =>
pinnedApiKeyId ? 'test-user-id' : null
),
}
})
vi.doMock('@/services/queue', () => ({
RateLimiter: vi.fn().mockImplementation(() => ({
checkRateLimit: vi.fn().mockResolvedValue({
@@ -222,6 +240,7 @@ describe('Webhook Trigger API Route', () => {
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
pinnedApiKeyId: 'test-pinned-api-key-id',
})
const req = createMockRequest('POST', { event: 'test', id: 'test-123' })
@@ -250,7 +269,11 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'test-token-123' },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
pinnedApiKeyId: 'test-pinned-api-key-id',
})
const headers = {
'Content-Type': 'application/json',
@@ -281,7 +304,11 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
pinnedApiKeyId: 'test-pinned-api-key-id',
})
const headers = {
'Content-Type': 'application/json',
@@ -308,7 +335,11 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'case-test-token' },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
pinnedApiKeyId: 'test-pinned-api-key-id',
})
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
@@ -354,7 +385,11 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
pinnedApiKeyId: 'test-pinned-api-key-id',
})
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
@@ -391,7 +426,6 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'correct-token' },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
@@ -424,7 +458,6 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
@@ -453,7 +486,6 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'required-token' },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const req = createMockRequest('POST', { event: 'no.auth.test' })
const params = Promise.resolve({ path: 'test-path' })
@@ -482,7 +514,6 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
@@ -515,7 +546,6 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',

View File

@@ -293,6 +293,13 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
}
}
// Attribution: this route is UI-only; require session user as actor
const actorUserId: string | null = session?.user?.id ?? null
if (!actorUserId) {
logger.warn(`[${requestId}] Unable to resolve actor user for workflow deployment: ${id}`)
return createErrorResponse('Unable to determine deploying user', 400)
}
await db.transaction(async (tx) => {
const [{ maxVersion }] = await tx
.select({ maxVersion: sql`COALESCE(MAX("version"), 0)` })
@@ -318,7 +325,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
state: currentState,
isActive: true,
createdAt: deployedAt,
createdBy: userId,
createdBy: actorUserId,
})
const updateData: Record<string, unknown> = {

View File

@@ -5,9 +5,11 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
import { getSession } from '@/lib/auth'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { env } from '@/lib/env'
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
@@ -23,6 +25,7 @@ import {
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
import { Executor } from '@/executor'
import type { ExecutionResult } from '@/executor/types'
import { Serializer } from '@/serializer'
import { RateLimitError, RateLimiter, type TriggerType } from '@/services/queue'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
@@ -32,15 +35,11 @@ const logger = createLogger('WorkflowExecuteAPI')
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
// Define the schema for environment variables
const EnvVarsSchema = z.record(z.string())
// Keep track of running executions to prevent duplicate requests
// Use a combination of workflow ID and request ID to allow concurrent executions with different inputs
const runningExecutions = new Set<string>()
// Utility function to filter out logs and workflowConnections from API response
function createFilteredResult(result: any) {
export function createFilteredResult(result: any) {
return {
...result,
logs: undefined,
@@ -53,7 +52,6 @@ function createFilteredResult(result: any) {
}
}
// Custom error class for usage limit exceeded
class UsageLimitError extends Error {
statusCode: number
constructor(message: string, statusCode = 402) {
@@ -62,20 +60,76 @@ class UsageLimitError extends Error {
}
}
async function executeWorkflow(
/**
* Resolves output IDs to the internal blockId_attribute format
* Supports both:
* - User-facing format: blockName.path (e.g., "agent1.content")
* - Internal format: blockId_attribute (e.g., "uuid_content") - used by chat deployments
*/
function resolveOutputIds(
selectedOutputs: string[] | undefined,
blocks: Record<string, any>
): string[] | undefined {
if (!selectedOutputs || selectedOutputs.length === 0) {
return selectedOutputs
}
// UUID regex to detect if it's already in blockId_attribute format
const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
return selectedOutputs.map((outputId) => {
// If it starts with a UUID, it's already in blockId_attribute format (from chat deployments)
if (UUID_REGEX.test(outputId)) {
return outputId
}
// Otherwise, it's in blockName.path format from the user/API
const dotIndex = outputId.indexOf('.')
if (dotIndex === -1) {
logger.warn(`Invalid output ID format (missing dot): ${outputId}`)
return outputId
}
const blockName = outputId.substring(0, dotIndex)
const path = outputId.substring(dotIndex + 1)
// Find the block by name (case-insensitive, ignoring spaces)
const normalizedBlockName = blockName.toLowerCase().replace(/\s+/g, '')
const block = Object.values(blocks).find((b: any) => {
const normalized = (b.name || '').toLowerCase().replace(/\s+/g, '')
return normalized === normalizedBlockName
})
if (!block) {
logger.warn(`Block not found for name: ${blockName} (from output ID: ${outputId})`)
return outputId
}
const resolvedId = `${block.id}_${path}`
logger.debug(`Resolved output ID: ${outputId} -> ${resolvedId}`)
return resolvedId
})
}
export async function executeWorkflow(
workflow: any,
requestId: string,
input?: any,
executingUserId?: string
input: any | undefined,
actorUserId: string,
streamConfig?: {
enabled: boolean
selectedOutputs?: string[]
isSecureMode?: boolean // When true, filter out all sensitive data
workflowTriggerType?: 'api' | 'chat' // Which trigger block type to look for (default: 'api')
onStream?: (streamingExec: any) => Promise<void> // Callback for streaming agent responses
onBlockComplete?: (blockId: string, output: any) => Promise<void> // Callback when any block completes
}
): Promise<any> {
const workflowId = workflow.id
const executionId = uuidv4()
// Create a unique execution key combining workflow ID and request ID
// This allows concurrent executions of the same workflow with different inputs
const executionKey = `${workflowId}:${requestId}`
// Skip if this exact execution is already running (prevents duplicate requests)
if (runningExecutions.has(executionKey)) {
logger.warn(`[${requestId}] Execution is already running: ${executionKey}`)
throw new Error('Execution is already running')
@@ -85,8 +139,8 @@ async function executeWorkflow(
// Rate limiting is now handled before entering the sync queue
// Check if the user has exceeded their usage limits
const usageCheck = await checkServerSideUsageLimits(workflow.userId)
// Check if the actor has exceeded their usage limits
const usageCheck = await checkServerSideUsageLimits(actorUserId)
if (usageCheck.isExceeded) {
logger.warn(`[${requestId}] User ${workflow.userId} has exceeded usage limits`, {
currentUsage: usageCheck.currentUsage,
@@ -132,13 +186,13 @@ async function executeWorkflow(
// Load personal (for the executing user) and workspace env (workspace overrides personal)
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
executingUserId || workflow.userId,
actorUserId,
workflow.workspaceId || undefined
)
const variables = EnvVarsSchema.parse({ ...personalEncrypted, ...workspaceEncrypted })
await loggingSession.safeStart({
userId: executingUserId || workflow.userId,
userId: actorUserId,
workspaceId: workflow.workspaceId,
variables,
})
@@ -273,15 +327,20 @@ async function executeWorkflow(
true // Enable validation during execution
)
// Determine API trigger start block
// Direct API execution ONLY works with API trigger blocks (or legacy starter in api/run mode)
const startBlock = TriggerUtils.findStartBlock(mergedStates, 'api', false) // isChildWorkflow = false
// Determine trigger start block based on execution type
// - 'chat': For chat deployments (looks for chat_trigger block)
// - 'api': For direct API execution (looks for api_trigger block)
// streamConfig is passed from POST handler when using streaming/chat
const preferredTriggerType = streamConfig?.workflowTriggerType || 'api'
const startBlock = TriggerUtils.findStartBlock(mergedStates, preferredTriggerType, false)
if (!startBlock) {
logger.error(`[${requestId}] No API trigger configured for this workflow`)
throw new Error(
'No API trigger configured for this workflow. Add an API Trigger block or use a Start block in API mode.'
)
const errorMsg =
preferredTriggerType === 'api'
? 'No API trigger block found. Add an API Trigger block to this workflow.'
: 'No chat trigger block found. Add a Chat Trigger block to this workflow.'
logger.error(`[${requestId}] ${errorMsg}`)
throw new Error(errorMsg)
}
const startBlockId = startBlock.blockId
@@ -299,38 +358,50 @@ async function executeWorkflow(
}
}
// Build context extensions
const contextExtensions: any = {
executionId,
workspaceId: workflow.workspaceId,
isDeployedContext: true,
}
// Add streaming configuration if enabled
if (streamConfig?.enabled) {
contextExtensions.stream = true
contextExtensions.selectedOutputs = streamConfig.selectedOutputs || []
contextExtensions.edges = edges.map((e: any) => ({
source: e.source,
target: e.target,
}))
contextExtensions.onStream = streamConfig.onStream
contextExtensions.onBlockComplete = streamConfig.onBlockComplete
}
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: processedInput,
workflowVariables,
contextExtensions: {
executionId,
workspaceId: workflow.workspaceId,
isDeployedContext: true,
},
contextExtensions,
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
const result = await executor.execute(workflowId, startBlockId)
// Check if we got a StreamingExecution result (with stream + execution properties)
// For API routes, we only care about the ExecutionResult part, not the stream
const executionResult = 'stream' in result && 'execution' in result ? result.execution : result
// Execute workflow (will always return ExecutionResult since we don't use onStream)
const result = (await executor.execute(workflowId, startBlockId)) as ExecutionResult
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
success: executionResult.success,
executionTime: executionResult.metadata?.duration,
success: result.success,
executionTime: result.metadata?.duration,
})
// Build trace spans from execution result (works for both success and failure)
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
const { traceSpans, totalDuration } = buildTraceSpans(result)
// Update workflow run counts if execution was successful
if (executionResult.success) {
if (result.success) {
await updateWorkflowRunCounts(workflowId)
// Track API call in user stats
@@ -340,20 +411,28 @@ async function executeWorkflow(
totalApiCalls: sql`total_api_calls + 1`,
lastActive: sql`now()`,
})
.where(eq(userStats.userId, workflow.userId))
.where(eq(userStats.userId, actorUserId))
}
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
finalOutput: executionResult.output || {},
finalOutput: result.output || {},
traceSpans: (traceSpans || []) as any,
})
return executionResult
// For non-streaming, return the execution result
return result
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
const executionResultForError = (error?.executionResult as ExecutionResult | undefined) || {
success: false,
output: {},
logs: [],
}
const { traceSpans } = buildTraceSpans(executionResultForError)
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
@@ -361,6 +440,7 @@ async function executeWorkflow(
message: error.message || 'Workflow execution failed',
stackTrace: error.stack,
},
traceSpans,
})
throw error
@@ -396,19 +476,30 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
// Synchronous execution
try {
// Check rate limits BEFORE entering queue for GET requests
if (triggerType === 'api') {
// Get user subscription (checks both personal and org subscriptions)
const userSubscription = await getHighestPrioritySubscription(validation.workflow.userId)
// Resolve actor user id
let actorUserId: string | null = null
if (triggerType === 'manual') {
actorUserId = session!.user!.id
} else {
const apiKeyHeader = request.headers.get('X-API-Key')
const auth = apiKeyHeader ? await authenticateApiKeyFromHeader(apiKeyHeader) : null
if (!auth?.success || !auth.userId) {
return createErrorResponse('Unauthorized', 401)
}
actorUserId = auth.userId
if (auth.keyId) {
void updateApiKeyLastUsed(auth.keyId).catch(() => {})
}
// Check rate limits BEFORE entering execution for API requests
const userSubscription = await getHighestPrioritySubscription(actorUserId)
const rateLimiter = new RateLimiter()
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
validation.workflow.userId,
actorUserId,
userSubscription,
triggerType,
false // isAsync = false for sync calls
'api',
false
)
if (!rateLimitCheck.allowed) {
throw new RateLimitError(
`Rate limit exceeded. You have ${rateLimitCheck.remaining} requests remaining. Resets at ${rateLimitCheck.resetAt.toISOString()}`
@@ -420,8 +511,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
validation.workflow,
requestId,
undefined,
// Executing user (manual run): if session present, use that user for fallback
(await getSession())?.user?.id || undefined
actorUserId as string
)
// Check if the workflow execution contains a response block output
@@ -487,40 +577,76 @@ export async function POST(
const executionMode = request.headers.get('X-Execution-Mode')
const isAsync = executionMode === 'async'
// Parse request body
// Parse request body first to check for internal parameters
const body = await request.text()
logger.info(`[${requestId}] ${body ? 'Request body provided' : 'No request body provided'}`)
let input = {}
let parsedBody: any = {}
if (body) {
try {
input = JSON.parse(body)
parsedBody = JSON.parse(body)
} catch (error) {
logger.error(`[${requestId}] Failed to parse request body as JSON`, error)
return createErrorResponse('Invalid JSON in request body', 400)
}
}
logger.info(`[${requestId}] Input passed to workflow:`, input)
logger.info(`[${requestId}] Input passed to workflow:`, parsedBody)
// Get authenticated user and determine trigger type
let authenticatedUserId: string | null = null
let triggerType: TriggerType = 'manual'
const extractExecutionParams = (req: NextRequest, body: any) => {
const internalSecret = req.headers.get('X-Internal-Secret')
const isInternalCall = internalSecret === env.INTERNAL_API_SECRET
const session = await getSession()
if (session?.user?.id) {
authenticatedUserId = session.user.id
triggerType = 'manual' // UI session (not rate limited)
} else {
const apiKeyHeader = request.headers.get('X-API-Key')
if (apiKeyHeader) {
authenticatedUserId = validation.workflow.userId
triggerType = 'api'
return {
isSecureMode: body.isSecureMode !== undefined ? body.isSecureMode : isInternalCall,
streamResponse: req.headers.get('X-Stream-Response') === 'true' || body.stream === true,
selectedOutputs:
body.selectedOutputs ||
(req.headers.get('X-Selected-Outputs')
? JSON.parse(req.headers.get('X-Selected-Outputs')!)
: undefined),
workflowTriggerType:
body.workflowTriggerType || (isInternalCall && body.stream ? 'chat' : 'api'),
input: body.input !== undefined ? body.input : body,
}
}
if (!authenticatedUserId) {
return createErrorResponse('Authentication required', 401)
const {
isSecureMode: finalIsSecureMode,
streamResponse,
selectedOutputs,
workflowTriggerType,
input,
} = extractExecutionParams(request as NextRequest, parsedBody)
// Get authenticated user and determine trigger type
let authenticatedUserId: string
let triggerType: TriggerType = 'manual'
// For internal calls (chat deployments), use the workflow owner's ID
if (finalIsSecureMode) {
authenticatedUserId = validation.workflow.userId
triggerType = 'manual' // Chat deployments use manual trigger type (no rate limit)
} else {
const session = await getSession()
const apiKeyHeader = request.headers.get('X-API-Key')
if (session?.user?.id && !apiKeyHeader) {
authenticatedUserId = session.user.id
triggerType = 'manual'
} else if (apiKeyHeader) {
const auth = await authenticateApiKeyFromHeader(apiKeyHeader)
if (!auth.success || !auth.userId) {
return createErrorResponse('Unauthorized', 401)
}
authenticatedUserId = auth.userId
triggerType = 'api'
if (auth.keyId) {
void updateApiKeyLastUsed(auth.keyId).catch(() => {})
}
} else {
return createErrorResponse('Authentication required', 401)
}
}
// Get user subscription (checks both personal and org subscriptions)
@@ -606,13 +732,47 @@ export async function POST(
)
}
// Handle streaming response - wrap execution in SSE stream
if (streamResponse) {
// Load workflow blocks to resolve output IDs from blockName.attribute to blockId_attribute format
const deployedData = await loadDeployedWorkflowState(workflowId)
const resolvedSelectedOutputs = selectedOutputs
? resolveOutputIds(selectedOutputs, deployedData.blocks || {})
: selectedOutputs
// Use shared streaming response creator
const { createStreamingResponse } = await import('@/lib/workflows/streaming')
const { SSE_HEADERS } = await import('@/lib/utils')
const stream = await createStreamingResponse({
requestId,
workflow: validation.workflow,
input,
executingUserId: authenticatedUserId,
streamConfig: {
selectedOutputs: resolvedSelectedOutputs,
isSecureMode: finalIsSecureMode,
workflowTriggerType,
},
createFilteredResult,
})
return new NextResponse(stream, {
status: 200,
headers: SSE_HEADERS,
})
}
// Non-streaming execution
const result = await executeWorkflow(
validation.workflow,
requestId,
input,
authenticatedUserId
authenticatedUserId,
undefined
)
// Non-streaming response
const hasResponseBlock = workflowHasResponseBlock(result)
if (hasResponseBlock) {
return createHttpResponseFromBlock(result)

View File

@@ -44,15 +44,17 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
variables: {},
})
const { traceSpans } = buildTraceSpans(result)
if (result.success === false) {
const message = result.error || 'Workflow execution failed'
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: result.metadata?.duration || 0,
error: { message },
traceSpans,
})
} else {
const { traceSpans } = buildTraceSpans(result)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: result.metadata?.duration || 0,

View File

@@ -1,6 +1,7 @@
import type { NextRequest } from 'next/server'
import { authenticateApiKey } from '@/lib/api-key/auth'
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { getWorkflowById } from '@/lib/workflows/utils'
@@ -37,7 +38,11 @@ export async function validateWorkflowAccess(
}
}
// API key authentication
const internalSecret = request.headers.get('X-Internal-Secret')
if (internalSecret === env.INTERNAL_API_SECRET) {
return { workflow }
}
let apiKeyHeader = null
for (const [key, value] of request.headers.entries()) {
if (key.toLowerCase() === 'x-api-key' && value) {

View File

@@ -4,8 +4,6 @@ import { useRef, useState } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import type { ChatMessage } from '@/app/chat/components/message/message'
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
// No longer need complex output extraction - backend handles this
import type { ExecutionResult } from '@/executor/types'
const logger = createLogger('UseChatStreaming')
@@ -148,11 +146,16 @@ export function useChatStreaming() {
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.substring(6)
if (data === '[DONE]') {
continue
}
try {
const json = JSON.parse(line.substring(6))
const json = JSON.parse(data)
const { blockId, chunk: contentChunk, event: eventType } = json
// Handle error events from the server
if (eventType === 'error' || json.event === 'error') {
const errorMessage = json.error || CHAT_ERROR_MESSAGES.GENERIC_ERROR
setMessages((prev) =>
@@ -172,34 +175,11 @@ export function useChatStreaming() {
}
if (eventType === 'final' && json.data) {
// The backend has already processed and combined all outputs
// We just need to extract the combined content and use it
const result = json.data as ExecutionResult
// Collect all content from logs that have output.content (backend processed)
let combinedContent = ''
if (result.logs) {
const contentParts: string[] = []
// Get content from all logs that have processed content
result.logs.forEach((log) => {
if (log.output?.content && typeof log.output.content === 'string') {
// The backend already includes proper separators, so just collect the content
contentParts.push(log.output.content)
}
})
// Join without additional separators since backend already handles this
combinedContent = contentParts.join('')
}
// Update the existing streaming message with the final combined content
setMessages((prev) =>
prev.map((msg) =>
msg.id === messageId
? {
...msg,
content: combinedContent || accumulatedText, // Use combined content or fallback to streamed
isStreaming: false,
}
: msg
@@ -210,7 +190,6 @@ export function useChatStreaming() {
}
if (blockId && contentChunk) {
// Track that this block has streamed content (like chat panel)
if (!messageIdMap.has(blockId)) {
messageIdMap.set(blockId, messageId)
}

View File

@@ -698,10 +698,6 @@ export function KnowledgeBase({
options={{
knowledgeBaseId: id,
currentWorkspaceId: knowledgeBase?.workspaceId || null,
onWorkspaceChange: () => {
// Refresh the page to reflect the workspace change
window.location.reload()
},
onDeleteKnowledgeBase: () => setShowDeleteDialog(true),
}}
/>

View File

@@ -11,6 +11,7 @@ import {
} from '@/components/ui/dropdown-menu'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { createLogger } from '@/lib/logs/console/logger'
import { useKnowledgeStore } from '@/stores/knowledge/store'
const logger = createLogger('WorkspaceSelector')
@@ -33,6 +34,7 @@ export function WorkspaceSelector({
onWorkspaceChange,
disabled = false,
}: WorkspaceSelectorProps) {
const { updateKnowledgeBase } = useKnowledgeStore()
const [workspaces, setWorkspaces] = useState<Workspace[]>([])
const [isLoading, setIsLoading] = useState(false)
const [isUpdating, setIsUpdating] = useState(false)
@@ -95,6 +97,11 @@ export function WorkspaceSelector({
if (result.success) {
logger.info(`Knowledge base workspace updated: ${knowledgeBaseId} -> ${workspaceId}`)
// Update the store immediately to reflect the change without page reload
updateKnowledgeBase(knowledgeBaseId, { workspaceId: workspaceId || undefined })
// Notify parent component of the change
onWorkspaceChange?.(workspaceId)
} else {
throw new Error(result.error || 'Failed to update workspace')

View File

@@ -1,7 +1,7 @@
'use client'
import { useState } from 'react'
import { Eye, Maximize2, Minimize2, X } from 'lucide-react'
import { Maximize2, Minimize2, X } from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Button } from '@/components/ui/button'
import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'
@@ -45,7 +45,6 @@ export function FrozenCanvasModal({
{/* Header */}
<DialogHeader className='flex flex-row items-center justify-between border-b bg-background p-4'>
<div className='flex items-center gap-3'>
<Eye className='h-5 w-5 text-blue-500 dark:text-blue-400' />
<div>
<DialogTitle className='font-semibold text-foreground text-lg'>
Logged Workflow State
@@ -83,14 +82,15 @@ export function FrozenCanvasModal({
traceSpans={traceSpans}
height='100%'
width='100%'
// Ensure preview leaves padding at edges so nodes don't touch header
/>
</div>
{/* Footer with instructions */}
<div className='border-t bg-background px-6 py-3'>
<div className='text-muted-foreground text-sm'>
💡 Click on blocks to see their input and output data at execution time. This canvas
shows the exact state of the workflow when this execution was captured.
Click on blocks to see their input and output data at execution time. This canvas shows
the exact state of the workflow when this execution was captured.
</div>
</div>
</DialogContent>

View File

@@ -582,6 +582,8 @@ export function FrozenCanvas({
workflowState={data.workflowState}
showSubBlocks={true}
isPannable={true}
defaultZoom={0.8}
fitPadding={0.25}
onNodeClick={(blockId) => {
// Always allow clicking blocks, even if they don't have execution data
// This is important for failed workflows where some blocks never executed

View File

@@ -13,6 +13,67 @@ import {
import { cn, redactApiKeys } from '@/lib/utils'
import type { TraceSpan } from '@/stores/logs/filters/types'
function getSpanKey(span: TraceSpan): string {
if (span.id) {
return span.id
}
const name = span.name || 'span'
const start = span.startTime || 'unknown-start'
const end = span.endTime || 'unknown-end'
return `${name}|${start}|${end}`
}
function mergeTraceSpanChildren(...groups: TraceSpan[][]): TraceSpan[] {
const merged: TraceSpan[] = []
const seen = new Set<string>()
groups.forEach((group) => {
group.forEach((child) => {
const key = getSpanKey(child)
if (seen.has(key)) {
return
}
seen.add(key)
merged.push(child)
})
})
return merged
}
function normalizeChildWorkflowSpan(span: TraceSpan): TraceSpan {
const enrichedSpan: TraceSpan = { ...span }
if (enrichedSpan.output && typeof enrichedSpan.output === 'object') {
enrichedSpan.output = { ...enrichedSpan.output }
}
const normalizedChildren = Array.isArray(span.children)
? span.children.map((childSpan) => normalizeChildWorkflowSpan(childSpan))
: []
const outputChildSpans = Array.isArray(span.output?.childTraceSpans)
? (span.output!.childTraceSpans as TraceSpan[]).map((childSpan) =>
normalizeChildWorkflowSpan(childSpan)
)
: []
const mergedChildren = mergeTraceSpanChildren(normalizedChildren, outputChildSpans)
if (enrichedSpan.output && 'childTraceSpans' in enrichedSpan.output) {
const { childTraceSpans, ...cleanOutput } = enrichedSpan.output as {
childTraceSpans?: TraceSpan[]
} & Record<string, unknown>
enrichedSpan.output = cleanOutput
}
enrichedSpan.children = mergedChildren.length > 0 ? mergedChildren : undefined
return enrichedSpan
}
interface TraceSpansDisplayProps {
traceSpans?: TraceSpan[]
totalDuration?: number
@@ -310,22 +371,23 @@ export function TraceSpansDisplay({
</div>
<div className='w-full overflow-hidden rounded-md border shadow-sm'>
{traceSpans.map((span, index) => {
const normalizedSpan = normalizeChildWorkflowSpan(span)
const hasSubItems = Boolean(
(span.children && span.children.length > 0) ||
(span.toolCalls && span.toolCalls.length > 0) ||
span.input ||
span.output
(normalizedSpan.children && normalizedSpan.children.length > 0) ||
(normalizedSpan.toolCalls && normalizedSpan.toolCalls.length > 0) ||
normalizedSpan.input ||
normalizedSpan.output
)
return (
<TraceSpanItem
key={index}
span={span}
span={normalizedSpan}
depth={0}
totalDuration={
actualTotalDuration !== undefined ? actualTotalDuration : totalDuration
}
isLast={index === traceSpans.length - 1}
parentStartTime={new Date(span.startTime).getTime()}
parentStartTime={new Date(normalizedSpan.startTime).getTime()}
workflowStartTime={workflowStartTime}
onToggle={handleSpanToggle}
expandedSpans={expandedSpans}
@@ -612,17 +674,19 @@ function TraceSpanItem({
{hasChildren && (
<div>
{span.children?.map((childSpan, index) => {
const enrichedChildSpan = normalizeChildWorkflowSpan(childSpan)
const childHasSubItems = Boolean(
(childSpan.children && childSpan.children.length > 0) ||
(childSpan.toolCalls && childSpan.toolCalls.length > 0) ||
childSpan.input ||
childSpan.output
(enrichedChildSpan.children && enrichedChildSpan.children.length > 0) ||
(enrichedChildSpan.toolCalls && enrichedChildSpan.toolCalls.length > 0) ||
enrichedChildSpan.input ||
enrichedChildSpan.output
)
return (
<TraceSpanItem
key={index}
span={childSpan}
span={enrichedChildSpan}
depth={depth + 1}
totalDuration={totalDuration}
isLast={index === (span.children?.length || 0) - 1}

View File

@@ -413,7 +413,7 @@ export function DeployForm({
setKeyType('personal')
if (createError) setCreateError(null)
}}
className='h-8'
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
>
Personal
</Button>
@@ -425,7 +425,7 @@ export function DeployForm({
setKeyType('workspace')
if (createError) setCreateError(null)
}}
className='h-8'
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
>
Workspace
</Button>
@@ -452,7 +452,7 @@ export function DeployForm({
<AlertDialogFooter className='flex'>
<AlertDialogCancel
className='h-9 w-full rounded-[8px]'
className='h-9 w-full rounded-[8px] border-border bg-background text-foreground hover:bg-muted dark:border-border dark:bg-background dark:text-foreground dark:hover:bg-muted/80'
onClick={() => {
setNewKeyName('')
setKeyType('personal')

View File

@@ -12,16 +12,20 @@ import {
} from '@/components/ui/dropdown-menu'
import { Label } from '@/components/ui/label'
import { getEnv, isTruthy } from '@/lib/env'
import { OutputSelect } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/chat/components/output-select/output-select'
interface ExampleCommandProps {
command: string
apiKey: string
endpoint: string
showLabel?: boolean
getInputFormatExample?: () => string
getInputFormatExample?: (includeStreaming?: boolean) => string
workflowId: string | null
selectedStreamingOutputs: string[]
onSelectedStreamingOutputsChange: (outputs: string[]) => void
}
type ExampleMode = 'sync' | 'async'
type ExampleMode = 'sync' | 'async' | 'stream'
type ExampleType = 'execute' | 'status' | 'rate-limits'
export function ExampleCommand({
@@ -30,29 +34,27 @@ export function ExampleCommand({
endpoint,
showLabel = true,
getInputFormatExample,
workflowId,
selectedStreamingOutputs,
onSelectedStreamingOutputsChange,
}: ExampleCommandProps) {
const [mode, setMode] = useState<ExampleMode>('sync')
const [exampleType, setExampleType] = useState<ExampleType>('execute')
const isAsyncEnabled = isTruthy(getEnv('NEXT_PUBLIC_TRIGGER_DEV_ENABLED'))
// Format the curl command to use a placeholder for the API key
const formatCurlCommand = (command: string, apiKey: string) => {
if (!command.includes('curl')) return command
// Replace the actual API key with a placeholder in the command
const sanitizedCommand = command.replace(apiKey, '$SIM_API_KEY')
// Format the command with line breaks for better readability
return sanitizedCommand
.replace(' -H ', '\n -H ')
.replace(' -d ', '\n -d ')
.replace(' http', '\n http')
}
// Get the command with placeholder for copying (single line, no line breaks)
const getActualCommand = () => {
const displayCommand = getDisplayCommand()
// Remove line breaks and extra whitespace for copying
return displayCommand
.replace(/\\\n\s*/g, ' ') // Remove backslash + newline + whitespace
.replace(/\n\s*/g, ' ') // Remove any remaining newlines + whitespace
@@ -63,32 +65,56 @@ export function ExampleCommand({
const getDisplayCommand = () => {
const baseEndpoint = endpoint.replace(apiKey, '$SIM_API_KEY')
const inputExample = getInputFormatExample
? getInputFormatExample()
? getInputFormatExample(false)
: ' -d \'{"input": "your data here"}\''
const addStreamingParams = (dashD: string) => {
const match = dashD.match(/-d\s*'([\s\S]*)'/)
if (!match) {
const payload: Record<string, any> = { stream: true }
if (selectedStreamingOutputs && selectedStreamingOutputs.length > 0) {
payload.selectedOutputs = selectedStreamingOutputs
}
return ` -d '${JSON.stringify(payload)}'`
}
try {
const payload = JSON.parse(match[1]) as Record<string, any>
payload.stream = true
if (selectedStreamingOutputs && selectedStreamingOutputs.length > 0) {
payload.selectedOutputs = selectedStreamingOutputs
}
return ` -d '${JSON.stringify(payload)}'`
} catch {
return dashD
}
}
switch (mode) {
case 'sync':
if (getInputFormatExample) {
const syncInputExample = getInputFormatExample(false)
return `curl -X POST \\\n -H "X-API-Key: $SIM_API_KEY" \\\n -H "Content-Type: application/json"${syncInputExample} \\\n ${baseEndpoint}`
}
return formatCurlCommand(command, apiKey)
case 'stream': {
const streamDashD = addStreamingParams(inputExample)
return `curl -X POST \\\n -H "X-API-Key: $SIM_API_KEY" \\\n -H "Content-Type: application/json"${streamDashD} \\\n ${baseEndpoint}`
}
case 'async':
switch (exampleType) {
case 'execute':
return `curl -X POST \\
-H "X-API-Key: $SIM_API_KEY" \\
-H "Content-Type: application/json" \\
-H "X-Execution-Mode: async"${inputExample} \\
${baseEndpoint}`
return `curl -X POST \\\n -H "X-API-Key: $SIM_API_KEY" \\\n -H "Content-Type: application/json" \\\n -H "X-Execution-Mode: async"${inputExample} \\\n ${baseEndpoint}`
case 'status': {
const baseUrl = baseEndpoint.split('/api/workflows/')[0]
return `curl -H "X-API-Key: $SIM_API_KEY" \\
${baseUrl}/api/jobs/JOB_ID_FROM_EXECUTION`
return `curl -H "X-API-Key: $SIM_API_KEY" \\\n ${baseUrl}/api/jobs/JOB_ID_FROM_EXECUTION`
}
case 'rate-limits': {
const baseUrlForRateLimit = baseEndpoint.split('/api/workflows/')[0]
return `curl -H "X-API-Key: $SIM_API_KEY" \\
${baseUrlForRateLimit}/api/users/me/usage-limits`
return `curl -H "X-API-Key: $SIM_API_KEY" \\\n ${baseUrlForRateLimit}/api/users/me/usage-limits`
}
default:
@@ -114,10 +140,11 @@ export function ExampleCommand({
}
return (
<div className='space-y-1.5'>
<div className='flex items-center justify-between'>
{showLabel && <Label className='font-medium text-sm'>Example</Label>}
{isAsyncEnabled && (
<div className='space-y-4'>
{/* Example Command */}
<div className='space-y-1.5'>
<div className='flex items-center justify-between'>
{showLabel && <Label className='font-medium text-sm'>Example</Label>}
<div className='flex items-center gap-1'>
<Button
variant='outline'
@@ -134,57 +161,85 @@ export function ExampleCommand({
<Button
variant='outline'
size='sm'
onClick={() => setMode('async')}
onClick={() => setMode('stream')}
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
mode === 'async'
mode === 'stream'
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
: ''
}`}
>
Async
Stream
</Button>
<DropdownMenu>
<DropdownMenuTrigger asChild>
{isAsyncEnabled && (
<>
<Button
variant='outline'
size='sm'
className='h-6 min-w-[140px] justify-between px-2 py-1 text-xs'
disabled={mode === 'sync'}
onClick={() => setMode('async')}
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
mode === 'async'
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
: ''
}`}
>
<span className='truncate'>{getExampleTitle()}</span>
<ChevronDown className='ml-1 h-3 w-3 flex-shrink-0' />
Async
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end'>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('execute')}
>
Async Execution
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('status')}
>
Check Job Status
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('rate-limits')}
>
Rate Limits & Usage
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='outline'
size='sm'
className='h-6 min-w-[140px] justify-between px-2 py-1 text-xs'
disabled={mode === 'sync' || mode === 'stream'}
>
<span className='truncate'>{getExampleTitle()}</span>
<ChevronDown className='ml-1 h-3 w-3 flex-shrink-0' />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end'>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('execute')}
>
Async Execution
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('status')}
>
Check Job Status
</DropdownMenuItem>
<DropdownMenuItem
className='cursor-pointer'
onClick={() => setExampleType('rate-limits')}
>
Rate Limits & Usage
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</>
)}
</div>
</div>
{/* Output selector for Stream mode */}
{mode === 'stream' && (
<div className='space-y-2'>
<div className='text-muted-foreground text-xs'>Select outputs to stream</div>
<OutputSelect
workflowId={workflowId}
selectedOutputs={selectedStreamingOutputs}
onOutputSelect={onSelectedStreamingOutputsChange}
placeholder='Select outputs for streaming'
valueMode='label'
/>
</div>
)}
</div>
<div className='group relative h-[120px] rounded-md border bg-background transition-colors hover:bg-muted/50'>
<pre className='h-full overflow-auto whitespace-pre-wrap p-3 font-mono text-xs'>
{getDisplayCommand()}
</pre>
<CopyButton text={getActualCommand()} />
<div className='group relative rounded-md border bg-background transition-colors hover:bg-muted/50'>
<pre className='whitespace-pre-wrap p-3 font-mono text-xs'>{getDisplayCommand()}</pre>
<CopyButton text={getActualCommand()} />
</div>
</div>
</div>
)

View File

@@ -43,7 +43,9 @@ interface DeploymentInfoProps {
workflowId: string | null
deployedState: WorkflowState
isLoadingDeployedState: boolean
getInputFormatExample?: () => string
getInputFormatExample?: (includeStreaming?: boolean) => string
selectedStreamingOutputs: string[]
onSelectedStreamingOutputsChange: (outputs: string[]) => void
}
export function DeploymentInfo({
@@ -57,6 +59,8 @@ export function DeploymentInfo({
deployedState,
isLoadingDeployedState,
getInputFormatExample,
selectedStreamingOutputs,
onSelectedStreamingOutputsChange,
}: DeploymentInfoProps) {
const [isViewingDeployed, setIsViewingDeployed] = useState(false)
@@ -116,6 +120,9 @@ export function DeploymentInfo({
apiKey={deploymentInfo.apiKey}
endpoint={deploymentInfo.endpoint}
getInputFormatExample={getInputFormatExample}
workflowId={workflowId}
selectedStreamingOutputs={selectedStreamingOutputs}
onSelectedStreamingOutputsChange={onSelectedStreamingOutputsChange}
/>
</div>

View File

@@ -64,7 +64,7 @@ interface DeployFormValues {
newKeyName?: string
}
type TabView = 'general' | 'api' | 'chat'
type TabView = 'general' | 'api' | 'versions' | 'chat'
export function DeployModal({
open,
@@ -92,6 +92,7 @@ export function DeployModal({
const [apiDeployError, setApiDeployError] = useState<string | null>(null)
const [chatExists, setChatExists] = useState(false)
const [isChatFormValid, setIsChatFormValid] = useState(false)
const [selectedStreamingOutputs, setSelectedStreamingOutputs] = useState<string[]>([])
const [versions, setVersions] = useState<WorkflowDeploymentVersionResponse[]>([])
const [versionsLoading, setVersionsLoading] = useState(false)
@@ -102,7 +103,7 @@ export function DeployModal({
const [currentPage, setCurrentPage] = useState(1)
const itemsPerPage = 5
const getInputFormatExample = () => {
const getInputFormatExample = (includeStreaming = false) => {
let inputFormatExample = ''
try {
const blocks = Object.values(useWorkflowStore.getState().blocks)
@@ -117,8 +118,9 @@ export function DeployModal({
if (targetBlock) {
const inputFormat = useSubBlockStore.getState().getValue(targetBlock.id, 'inputFormat')
const exampleData: Record<string, any> = {}
if (inputFormat && Array.isArray(inputFormat) && inputFormat.length > 0) {
const exampleData: Record<string, any> = {}
inputFormat.forEach((field: any) => {
if (field.name) {
switch (field.type) {
@@ -140,7 +142,40 @@ export function DeployModal({
}
}
})
}
// Add streaming parameters if enabled and outputs are selected
if (includeStreaming && selectedStreamingOutputs.length > 0) {
exampleData.stream = true
// Convert blockId_attribute format to blockName.attribute format for display
const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
const convertedOutputs = selectedStreamingOutputs.map((outputId) => {
// If it starts with a UUID, convert to blockName.attribute format
if (UUID_REGEX.test(outputId)) {
const underscoreIndex = outputId.indexOf('_')
if (underscoreIndex === -1) return outputId
const blockId = outputId.substring(0, underscoreIndex)
const attribute = outputId.substring(underscoreIndex + 1)
// Find the block by ID and get its name
const block = blocks.find((b) => b.id === blockId)
if (block?.name) {
// Normalize block name: lowercase and remove spaces
const normalizedBlockName = block.name.toLowerCase().replace(/\s+/g, '')
return `${normalizedBlockName}.${attribute}`
}
}
// Already in blockName.attribute format or couldn't convert
return outputId
})
exampleData.selectedOutputs = convertedOutputs
}
if (Object.keys(exampleData).length > 0) {
inputFormatExample = ` -d '${JSON.stringify(exampleData)}'`
}
}
@@ -199,7 +234,7 @@ export function DeployModal({
setIsLoading(true)
fetchApiKeys()
fetchChatDeploymentInfo()
setActiveTab('general')
setActiveTab('api')
}
}, [open, workflowId])
@@ -231,7 +266,7 @@ export function DeployModal({
const data = await response.json()
const endpoint = `${getEnv('NEXT_PUBLIC_APP_URL')}/api/workflows/${workflowId}/execute`
const inputFormatExample = getInputFormatExample()
const inputFormatExample = getInputFormatExample(selectedStreamingOutputs.length > 0) // Include streaming params only if outputs selected
setDeploymentInfo({
isDeployed: data.isDeployed,
@@ -287,7 +322,7 @@ export function DeployModal({
useWorkflowRegistry.getState().setWorkflowNeedsRedeployment(workflowId, false)
}
const endpoint = `${getEnv('NEXT_PUBLIC_APP_URL')}/api/workflows/${workflowId}/execute`
const inputFormatExample = getInputFormatExample()
const inputFormatExample = getInputFormatExample(selectedStreamingOutputs.length > 0) // Include streaming params only if outputs selected
const newDeploymentInfo = {
isDeployed: true,
@@ -494,7 +529,7 @@ export function DeployModal({
return (
<Dialog open={open} onOpenChange={handleCloseModal}>
<DialogContent
className='flex max-h-[78vh] flex-col gap-0 overflow-hidden p-0 sm:max-w-[600px]'
className='flex max-h-[90vh] flex-col gap-0 overflow-hidden p-0 sm:max-w-[600px]'
hideCloseButton
>
<DialogHeader className='flex-shrink-0 border-b px-6 py-4'>
@@ -510,16 +545,6 @@ export function DeployModal({
<div className='flex flex-1 flex-col overflow-hidden'>
<div className='flex h-14 flex-none items-center border-b px-6'>
<div className='flex gap-2'>
<button
onClick={() => setActiveTab('general')}
className={`rounded-md px-3 py-1 text-sm transition-colors ${
activeTab === 'general'
? 'bg-accent text-foreground'
: 'text-muted-foreground hover:bg-accent/50 hover:text-foreground'
}`}
>
General
</button>
<button
onClick={() => setActiveTab('api')}
className={`rounded-md px-3 py-1 text-sm transition-colors ${
@@ -530,6 +555,16 @@ export function DeployModal({
>
API
</button>
<button
onClick={() => setActiveTab('versions')}
className={`rounded-md px-3 py-1 text-sm transition-colors ${
activeTab === 'versions'
? 'bg-accent text-foreground'
: 'text-muted-foreground hover:bg-accent/50 hover:text-foreground'
}`}
>
Versions
</button>
<button
onClick={() => setActiveTab('chat')}
className={`rounded-md px-3 py-1 text-sm transition-colors ${
@@ -545,175 +580,6 @@ export function DeployModal({
<div className='flex-1 overflow-y-auto'>
<div className='p-6'>
{activeTab === 'general' && (
<>
{isDeployed ? (
<DeploymentInfo
isLoading={isLoading}
deploymentInfo={
deploymentInfo ? { ...deploymentInfo, needsRedeployment } : null
}
onRedeploy={handleRedeploy}
onUndeploy={handleUndeploy}
isSubmitting={isSubmitting}
isUndeploying={isUndeploying}
workflowId={workflowId}
deployedState={deployedState}
isLoadingDeployedState={isLoadingDeployedState}
getInputFormatExample={getInputFormatExample}
/>
) : (
<>
{apiDeployError && (
<div className='mb-4 rounded-md border border-destructive/30 bg-destructive/10 p-3 text-destructive text-sm'>
<div className='font-semibold'>API Deployment Error</div>
<div>{apiDeployError}</div>
</div>
)}
<div className='-mx-1 px-1'>
<DeployForm
apiKeys={apiKeys}
keysLoaded={keysLoaded}
onSubmit={onDeploy}
onApiKeyCreated={fetchApiKeys}
formId='deploy-api-form-general'
/>
</div>
</>
)}
<div className='mt-6'>
<div className='mb-3 font-medium text-sm'>Deployment Versions</div>
{versionsLoading ? (
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
Loading deployments...
</div>
) : versions.length === 0 ? (
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
No deployments yet
</div>
) : (
<>
<div className='overflow-hidden rounded-md border'>
<table className='w-full'>
<thead className='border-b bg-muted/50'>
<tr>
<th className='w-10' />
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
Version
</th>
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
Deployed By
</th>
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
Created
</th>
<th className='w-10' />
</tr>
</thead>
<tbody className='divide-y'>
{versions
.slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage)
.map((v) => (
<tr
key={v.id}
className='cursor-pointer transition-colors hover:bg-muted/30'
onClick={() => openVersionPreview(v.version)}
>
<td className='px-4 py-2.5'>
<div
className={`h-2 w-2 rounded-full ${
v.isActive ? 'bg-green-500' : 'bg-muted-foreground/40'
}`}
title={v.isActive ? 'Active' : 'Inactive'}
/>
</td>
<td className='px-4 py-2.5'>
<span className='font-medium text-sm'>v{v.version}</span>
</td>
<td className='px-4 py-2.5'>
<span className='text-muted-foreground text-sm'>
{v.deployedBy || 'Unknown'}
</span>
</td>
<td className='px-4 py-2.5'>
<span className='text-muted-foreground text-sm'>
{new Date(v.createdAt).toLocaleDateString()}{' '}
{new Date(v.createdAt).toLocaleTimeString()}
</span>
</td>
<td
className='px-4 py-2.5'
onClick={(e) => e.stopPropagation()}
>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='ghost'
size='icon'
className='h-8 w-8'
disabled={activatingVersion === v.version}
>
<MoreVertical className='h-4 w-4' />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end'>
<DropdownMenuItem
onClick={() => activateVersion(v.version)}
disabled={v.isActive || activatingVersion === v.version}
>
{v.isActive
? 'Active'
: activatingVersion === v.version
? 'Activating...'
: 'Activate'}
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => openVersionPreview(v.version)}
>
Inspect
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</td>
</tr>
))}
</tbody>
</table>
</div>
{versions.length > itemsPerPage && (
<div className='mt-3 flex items-center justify-between'>
<span className='text-muted-foreground text-sm'>
Showing{' '}
{Math.min((currentPage - 1) * itemsPerPage + 1, versions.length)} -{' '}
{Math.min(currentPage * itemsPerPage, versions.length)} of{' '}
{versions.length}
</span>
<div className='flex gap-2'>
<Button
variant='outline'
size='sm'
onClick={() => setCurrentPage(currentPage - 1)}
disabled={currentPage === 1}
>
Previous
</Button>
<Button
variant='outline'
size='sm'
onClick={() => setCurrentPage(currentPage + 1)}
disabled={currentPage * itemsPerPage >= versions.length}
>
Next
</Button>
</div>
</div>
)}
</>
)}
</div>
</>
)}
{activeTab === 'api' && (
<>
{isDeployed ? (
@@ -730,6 +596,8 @@ export function DeployModal({
deployedState={deployedState}
isLoadingDeployedState={isLoadingDeployedState}
getInputFormatExample={getInputFormatExample}
selectedStreamingOutputs={selectedStreamingOutputs}
onSelectedStreamingOutputsChange={setSelectedStreamingOutputs}
/>
) : (
<>
@@ -739,6 +607,7 @@ export function DeployModal({
<div>{apiDeployError}</div>
</div>
)}
<div className='-mx-1 px-1'>
<DeployForm
apiKeys={apiKeys}
@@ -753,6 +622,136 @@ export function DeployModal({
</>
)}
{activeTab === 'versions' && (
<>
<div className='mb-3 font-medium text-sm'>Deployment Versions</div>
{versionsLoading ? (
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
Loading deployments...
</div>
) : versions.length === 0 ? (
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
No deployments yet
</div>
) : (
<>
<div className='overflow-hidden rounded-md border'>
<table className='w-full'>
<thead className='border-b bg-muted/50'>
<tr>
<th className='w-10' />
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
Version
</th>
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
Deployed By
</th>
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
Created
</th>
<th className='w-10' />
</tr>
</thead>
<tbody className='divide-y'>
{versions
.slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage)
.map((v) => (
<tr
key={v.id}
className='cursor-pointer transition-colors hover:bg-muted/30'
onClick={() => openVersionPreview(v.version)}
>
<td className='px-4 py-2.5'>
<div
className={`h-2 w-2 rounded-full ${
v.isActive ? 'bg-green-500' : 'bg-muted-foreground/40'
}`}
title={v.isActive ? 'Active' : 'Inactive'}
/>
</td>
<td className='px-4 py-2.5'>
<span className='font-medium text-sm'>v{v.version}</span>
</td>
<td className='px-4 py-2.5'>
<span className='text-muted-foreground text-sm'>
{v.deployedBy || 'Unknown'}
</span>
</td>
<td className='px-4 py-2.5'>
<span className='text-muted-foreground text-sm'>
{new Date(v.createdAt).toLocaleDateString()}{' '}
{new Date(v.createdAt).toLocaleTimeString()}
</span>
</td>
<td className='px-4 py-2.5' onClick={(e) => e.stopPropagation()}>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant='ghost'
size='icon'
className='h-8 w-8'
disabled={activatingVersion === v.version}
>
<MoreVertical className='h-4 w-4' />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align='end'>
<DropdownMenuItem
onClick={() => activateVersion(v.version)}
disabled={v.isActive || activatingVersion === v.version}
>
{v.isActive
? 'Active'
: activatingVersion === v.version
? 'Activating...'
: 'Activate'}
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => openVersionPreview(v.version)}
>
Inspect
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</td>
</tr>
))}
</tbody>
</table>
</div>
{versions.length > itemsPerPage && (
<div className='mt-3 flex items-center justify-between'>
<span className='text-muted-foreground text-sm'>
Showing{' '}
{Math.min((currentPage - 1) * itemsPerPage + 1, versions.length)} -{' '}
{Math.min(currentPage * itemsPerPage, versions.length)} of{' '}
{versions.length}
</span>
<div className='flex gap-2'>
<Button
variant='outline'
size='sm'
onClick={() => setCurrentPage(currentPage - 1)}
disabled={currentPage === 1}
>
Previous
</Button>
<Button
variant='outline'
size='sm'
onClick={() => setCurrentPage(currentPage + 1)}
disabled={currentPage * itemsPerPage >= versions.length}
>
Next
</Button>
</div>
</div>
)}
</>
)}
</>
)}
{activeTab === 'chat' && (
<ChatDeploy
workflowId={workflowId || ''}
@@ -776,36 +775,6 @@ export function DeployModal({
</div>
</div>
{activeTab === 'general' && !isDeployed && (
<div className='flex flex-shrink-0 justify-between border-t px-6 py-4'>
<Button variant='outline' onClick={handleCloseModal}>
Cancel
</Button>
<Button
type='submit'
form='deploy-api-form-general'
disabled={isSubmitting || (!keysLoaded && !apiKeys.length)}
className={cn(
'gap-2 font-medium',
'bg-[var(--brand-primary-hover-hex)] hover:bg-[var(--brand-primary-hover-hex)]',
'shadow-[0_0_0_0_var(--brand-primary-hover-hex)] hover:shadow-[0_0_0_4px_rgba(127,47,255,0.15)]',
'text-white transition-all duration-200',
'disabled:opacity-50 disabled:hover:bg-[var(--brand-primary-hover-hex)] disabled:hover:shadow-none'
)}
>
{isSubmitting ? (
<>
<Loader2 className='mr-1.5 h-3.5 w-3.5 animate-spin' />
Deploying...
</>
) : (
'Deploy'
)}
</Button>
</div>
)}
{activeTab === 'api' && !isDeployed && (
<div className='flex flex-shrink-0 justify-between border-t px-6 py-4'>
<Button variant='outline' onClick={handleCloseModal}>

View File

@@ -56,7 +56,8 @@ export function DeployedWorkflowCard({
<div className='flex items-center justify-between'>
<h3 className='font-medium'>Workflow Preview</h3>
<div className='flex items-center gap-2'>
{hasCurrent && (
{/* Show Current only when no explicit version is selected */}
{hasCurrent && !hasSelected && (
<button
type='button'
className={cn(
@@ -68,6 +69,7 @@ export function DeployedWorkflowCard({
Current
</button>
)}
{/* Always show Active Deployed */}
{hasActive && (
<button
type='button'
@@ -80,6 +82,7 @@ export function DeployedWorkflowCard({
Active Deployed
</button>
)}
{/* If a specific version is selected, show its label */}
{hasSelected && (
<button
type='button'
@@ -109,7 +112,7 @@ export function DeployedWorkflowCard({
width='100%'
isPannable={true}
defaultPosition={{ x: 0, y: 0 }}
defaultZoom={1}
defaultZoom={0.8}
/>
</div>
</CardContent>

View File

@@ -106,17 +106,22 @@ export function DeployedWorkflowModal({
selectedVersionLabel={selectedVersionLabel}
/>
<div className='mt-6 flex justify-between'>
<div className='mt-1 flex justify-between'>
<div className='flex items-center gap-2'>
{onActivateVersion && (
<Button
onClick={onActivateVersion}
disabled={isSelectedVersionActive || !!isActivating}
variant={isSelectedVersionActive ? 'secondary' : 'default'}
>
{isSelectedVersionActive ? 'Active' : isActivating ? 'Activating…' : 'Activate'}
</Button>
)}
{onActivateVersion &&
(isSelectedVersionActive ? (
<div className='inline-flex items-center gap-2 rounded-md bg-emerald-500/10 px-2.5 py-1 font-medium text-emerald-600 text-xs dark:text-emerald-400'>
<span className='relative flex h-2 w-2 items-center justify-center'>
<span className='absolute inline-flex h-full w-full animate-ping rounded-full bg-emerald-500 opacity-75' />
<span className='relative inline-flex h-2 w-2 rounded-full bg-emerald-500' />
</span>
Active
</div>
) : (
<Button onClick={onActivateVersion} disabled={!!isActivating}>
{isActivating ? 'Activating…' : 'Activate'}
</Button>
))}
</div>
<div className='flex items-center gap-2'>

View File

@@ -305,7 +305,18 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
// Check if we got a streaming response
if (result && 'stream' in result && result.stream instanceof ReadableStream) {
const messageIdMap = new Map<string, string>()
// Create a single message for all outputs (like chat client does)
const responseMessageId = crypto.randomUUID()
let accumulatedContent = ''
// Add initial streaming message
addMessage({
id: responseMessageId,
content: '',
workflowId: activeWorkflowId,
type: 'workflow',
isStreaming: true,
})
const reader = result.stream.getReader()
const decoder = new TextDecoder()
@@ -314,8 +325,8 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
while (true) {
const { done, value } = await reader.read()
if (done) {
// Finalize all streaming messages
messageIdMap.forEach((id) => finalizeMessageStream(id))
// Finalize the streaming message
finalizeMessageStream(responseMessageId)
break
}
@@ -324,92 +335,38 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
for (const line of lines) {
if (line.startsWith('data: ')) {
try {
const json = JSON.parse(line.substring(6))
const { blockId, chunk: contentChunk, event, data } = json
const data = line.substring(6)
if (event === 'final' && data) {
const result = data as ExecutionResult
if (data === '[DONE]') {
continue
}
try {
const json = JSON.parse(data)
const { blockId, chunk: contentChunk, event, data: eventData } = json
if (event === 'final' && eventData) {
const result = eventData as ExecutionResult
// If final result is a failure, surface error and stop
if ('success' in result && !result.success) {
addMessage({
content: `Error: ${result.error || 'Workflow execution failed'}`,
workflowId: activeWorkflowId,
type: 'workflow',
})
// Clear any existing message streams
for (const msgId of messageIdMap.values()) {
finalizeMessageStream(msgId)
}
messageIdMap.clear()
// Update the existing message with error
appendMessageContent(
responseMessageId,
`${accumulatedContent ? '\n\n' : ''}Error: ${result.error || 'Workflow execution failed'}`
)
finalizeMessageStream(responseMessageId)
// Stop processing
return
}
const nonStreamingLogs =
result.logs?.filter((log) => !messageIdMap.has(log.blockId)) || []
if (nonStreamingLogs.length > 0) {
const outputsToRender = selectedOutputs.filter((outputId) => {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
})
for (const outputId of outputsToRender) {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
const path = extractPathFromOutputId(outputId, blockIdForOutput)
const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
if (log) {
let output = log.output
if (path) {
output = parseOutputContentSafely(output)
const pathParts = path.split('.')
let current = output
for (const part of pathParts) {
if (current && typeof current === 'object' && part in current) {
current = current[part]
} else {
current = undefined
break
}
}
output = current
}
if (output !== undefined) {
addMessage({
content: typeof output === 'string' ? output : JSON.stringify(output),
workflowId: activeWorkflowId,
type: 'workflow',
})
}
}
}
}
// Final event just marks completion, content already streamed
finalizeMessageStream(responseMessageId)
} else if (blockId && contentChunk) {
if (!messageIdMap.has(blockId)) {
const newMessageId = crypto.randomUUID()
messageIdMap.set(blockId, newMessageId)
addMessage({
id: newMessageId,
content: contentChunk,
workflowId: activeWorkflowId,
type: 'workflow',
isStreaming: true,
})
} else {
const existingMessageId = messageIdMap.get(blockId)
if (existingMessageId) {
appendMessageContent(existingMessageId, contentChunk)
}
}
} else if (blockId && event === 'end') {
const existingMessageId = messageIdMap.get(blockId)
if (existingMessageId) {
finalizeMessageStream(existingMessageId)
}
// Accumulate all content into the single message
accumulatedContent += contentChunk
appendMessageContent(responseMessageId, contentChunk)
}
} catch (e) {
logger.error('Error parsing stream data:', e)

View File

@@ -1,5 +1,6 @@
import { useEffect, useMemo, useRef, useState } from 'react'
import { Check, ChevronDown } from 'lucide-react'
import { createPortal } from 'react-dom'
import { extractFieldsFromSchema, parseResponseFormatSafely } from '@/lib/response-format'
import { cn } from '@/lib/utils'
import { getBlock } from '@/blocks'
@@ -13,6 +14,7 @@ interface OutputSelectProps {
onOutputSelect: (outputIds: string[]) => void
disabled?: boolean
placeholder?: string
valueMode?: 'id' | 'label'
}
export function OutputSelect({
@@ -21,11 +23,46 @@ export function OutputSelect({
onOutputSelect,
disabled = false,
placeholder = 'Select output sources',
valueMode = 'id',
}: OutputSelectProps) {
const [isOutputDropdownOpen, setIsOutputDropdownOpen] = useState(false)
const dropdownRef = useRef<HTMLDivElement>(null)
const portalRef = useRef<HTMLDivElement>(null)
const [portalStyle, setPortalStyle] = useState<{
top: number
left: number
width: number
height: number
} | null>(null)
const blocks = useWorkflowStore((state) => state.blocks)
const { isShowingDiff, isDiffReady, diffWorkflow } = useWorkflowDiffStore()
// Find all scrollable ancestors so the dropdown can stay pinned on scroll
const getScrollableAncestors = (el: HTMLElement | null): (HTMLElement | Window)[] => {
const ancestors: (HTMLElement | Window)[] = []
let node: HTMLElement | null = el?.parentElement || null
const isScrollable = (elem: HTMLElement) => {
const style = window.getComputedStyle(elem)
const overflowY = style.overflowY
const overflow = style.overflow
const hasScroll = elem.scrollHeight > elem.clientHeight
return (
hasScroll &&
(overflowY === 'auto' ||
overflowY === 'scroll' ||
overflow === 'auto' ||
overflow === 'scroll')
)
}
while (node && node !== document.body) {
if (isScrollable(node)) ancestors.push(node)
node = node.parentElement
}
// Always include window as a fallback
ancestors.push(window)
return ancestors
}
// Track subblock store state to ensure proper reactivity
const subBlockValues = useSubBlockStore((state) =>
@@ -166,28 +203,31 @@ export function OutputSelect({
return outputs
}, [workflowBlocks, workflowId, isShowingDiff, isDiffReady, diffWorkflow, blocks, subBlockValues])
// Utility to check selected by id or label
const isSelectedValue = (o: { id: string; label: string }) =>
selectedOutputs.includes(o.id) || selectedOutputs.includes(o.label)
// Get selected outputs display text
const selectedOutputsDisplayText = useMemo(() => {
if (!selectedOutputs || selectedOutputs.length === 0) {
return placeholder
}
// Ensure all selected outputs exist in the workflowOutputs array
const validOutputs = selectedOutputs.filter((id) => workflowOutputs.some((o) => o.id === id))
// Ensure all selected outputs exist in the workflowOutputs array by id or label
const validOutputs = selectedOutputs.filter((val) =>
workflowOutputs.some((o) => o.id === val || o.label === val)
)
if (validOutputs.length === 0) {
return placeholder
}
if (validOutputs.length === 1) {
const output = workflowOutputs.find((o) => o.id === validOutputs[0])
const output = workflowOutputs.find(
(o) => o.id === validOutputs[0] || o.label === validOutputs[0]
)
if (output) {
// Add defensive check for output.blockName
const blockNameText =
output.blockName && typeof output.blockName === 'string'
? output.blockName.replace(/\s+/g, '').toLowerCase()
: `block-${output.blockId}`
return `${blockNameText}.${output.path}`
return output.label
}
return placeholder
}
@@ -199,10 +239,14 @@ export function OutputSelect({
const selectedOutputInfo = useMemo(() => {
if (!selectedOutputs || selectedOutputs.length === 0) return null
const validOutputs = selectedOutputs.filter((id) => workflowOutputs.some((o) => o.id === id))
const validOutputs = selectedOutputs.filter((val) =>
workflowOutputs.some((o) => o.id === val || o.label === val)
)
if (validOutputs.length === 0) return null
const output = workflowOutputs.find((o) => o.id === validOutputs[0])
const output = workflowOutputs.find(
(o) => o.id === validOutputs[0] || o.label === validOutputs[0]
)
if (!output) return null
return {
@@ -295,7 +339,10 @@ export function OutputSelect({
// Close dropdown when clicking outside
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) {
const target = event.target as Node
const insideTrigger = dropdownRef.current?.contains(target)
const insidePortal = portalRef.current?.contains(target)
if (!insideTrigger && !insidePortal) {
setIsOutputDropdownOpen(false)
}
}
@@ -306,15 +353,52 @@ export function OutputSelect({
}
}, [])
// Position the portal dropdown relative to the trigger button
useEffect(() => {
const updatePosition = () => {
if (!isOutputDropdownOpen || !dropdownRef.current) return
const rect = dropdownRef.current.getBoundingClientRect()
const available = Math.max(140, window.innerHeight - rect.bottom - 12)
const height = Math.min(available, 240)
setPortalStyle({ top: rect.bottom + 4, left: rect.left, width: rect.width, height })
}
let attachedScrollTargets: (HTMLElement | Window)[] = []
let rafId: number | null = null
if (isOutputDropdownOpen) {
updatePosition()
window.addEventListener('resize', updatePosition)
attachedScrollTargets = getScrollableAncestors(dropdownRef.current)
attachedScrollTargets.forEach((target) =>
target.addEventListener('scroll', updatePosition, { passive: true })
)
const loop = () => {
updatePosition()
rafId = requestAnimationFrame(loop)
}
rafId = requestAnimationFrame(loop)
}
return () => {
window.removeEventListener('resize', updatePosition)
attachedScrollTargets.forEach((target) =>
target.removeEventListener('scroll', updatePosition)
)
if (rafId) cancelAnimationFrame(rafId)
}
}, [isOutputDropdownOpen])
// Handle output selection - toggle selection
const handleOutputSelection = (value: string) => {
const emittedValue =
valueMode === 'label' ? value : workflowOutputs.find((o) => o.label === value)?.id || value
let newSelectedOutputs: string[]
const index = selectedOutputs.indexOf(value)
const index = selectedOutputs.indexOf(emittedValue)
if (index === -1) {
newSelectedOutputs = [...new Set([...selectedOutputs, value])]
newSelectedOutputs = [...new Set([...selectedOutputs, emittedValue])]
} else {
newSelectedOutputs = selectedOutputs.filter((id) => id !== value)
newSelectedOutputs = selectedOutputs.filter((id) => id !== emittedValue)
}
onOutputSelect(newSelectedOutputs)
@@ -359,48 +443,73 @@ export function OutputSelect({
/>
</button>
{isOutputDropdownOpen && workflowOutputs.length > 0 && (
<div className='absolute left-0 z-50 mt-1 w-full overflow-hidden rounded-[8px] border border-[#E5E5E5] bg-[#FFFFFF] pt-1 shadow-xs dark:border-[#414141] dark:bg-[var(--surface-elevated)]'>
<div className='max-h-[230px] overflow-y-auto'>
{Object.entries(groupedOutputs).map(([blockName, outputs]) => (
<div key={blockName}>
<div className='border-[#E5E5E5] border-t px-3 pt-1.5 pb-0.5 font-normal text-muted-foreground text-xs first:border-t-0 dark:border-[#414141]'>
{blockName}
</div>
<div>
{outputs.map((output) => (
<button
type='button'
key={output.id}
onClick={() => handleOutputSelection(output.id)}
className={cn(
'flex w-full items-center gap-2 px-3 py-1.5 text-left font-normal text-sm',
'hover:bg-accent hover:text-accent-foreground',
'focus:bg-accent focus:text-accent-foreground focus:outline-none'
)}
>
<div
className='flex h-5 w-5 flex-shrink-0 items-center justify-center rounded'
style={{
backgroundColor: getOutputColor(output.blockId, output.blockType),
}}
>
<span className='h-3 w-3 font-bold text-white text-xs'>
{blockName.charAt(0).toUpperCase()}
</span>
</div>
<span className='flex-1 truncate'>{output.path}</span>
{selectedOutputs.includes(output.id) && (
<Check className='h-4 w-4 flex-shrink-0 text-muted-foreground' />
)}
</button>
))}
</div>
{isOutputDropdownOpen &&
workflowOutputs.length > 0 &&
portalStyle &&
createPortal(
<div
ref={portalRef}
style={{
position: 'fixed',
top: portalStyle.top - 1, // overlap border by 1px to avoid visible gap
left: portalStyle.left,
width: portalStyle.width,
zIndex: 2147483647,
pointerEvents: 'auto',
}}
className='mt-0'
data-rs-scroll-lock-ignore
>
<div className='overflow-hidden rounded-[8px] border border-[#E5E5E5] bg-[#FFFFFF] pt-1 shadow-xs dark:border-[#414141] dark:bg-[var(--surface-elevated)]'>
<div
className='overflow-y-auto overscroll-contain'
style={{ maxHeight: portalStyle.height }}
onWheel={(e) => {
// Keep wheel scroll inside the dropdown and avoid dialog/body scroll locks
e.stopPropagation()
}}
>
{Object.entries(groupedOutputs).map(([blockName, outputs]) => (
<div key={blockName}>
<div className='border-[#E5E5E5] border-t px-3 pt-1.5 pb-0.5 font-normal text-muted-foreground text-xs first:border-t-0 dark:border-[#414141]'>
{blockName}
</div>
<div>
{outputs.map((output) => (
<button
type='button'
key={output.id}
onClick={() => handleOutputSelection(output.label)}
className={cn(
'flex w-full items-center gap-2 px-3 py-1.5 text-left font-normal text-sm',
'hover:bg-accent hover:text-accent-foreground',
'focus:bg-accent focus:text-accent-foreground focus:outline-none'
)}
>
<div
className='flex h-5 w-5 flex-shrink-0 items-center justify-center rounded'
style={{
backgroundColor: getOutputColor(output.blockId, output.blockType),
}}
>
<span className='h-3 w-3 font-bold text-white text-xs'>
{blockName.charAt(0).toUpperCase()}
</span>
</div>
<span className='flex-1 truncate'>{output.path}</span>
{isSelectedValue(output) && (
<Check className='h-4 w-4 flex-shrink-0 text-muted-foreground' />
)}
</button>
))}
</div>
</div>
))}
</div>
))}
</div>
</div>
)}
</div>
</div>,
document.body
)}
</div>
)
}

View File

@@ -13,13 +13,16 @@ import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { CodeLanguage } from '@/lib/execution/languages'
import { createLogger } from '@/lib/logs/console/logger'
import { cn } from '@/lib/utils'
import { isLikelyReferenceSegment, SYSTEM_REFERENCE_PREFIXES } from '@/lib/workflows/references'
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
import type { GenerationType } from '@/blocks/types'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { useTagSelection } from '@/hooks/use-tag-selection'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { normalizeBlockName } from '@/stores/workflows/utils'
const logger = createLogger('Code')
@@ -99,6 +102,8 @@ export function Code({
const [activeSourceBlockId, setActiveSourceBlockId] = useState<string | null>(null)
const [visualLineHeights, setVisualLineHeights] = useState<number[]>([])
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
const collapsedStateKey = `${subBlockId}_collapsed`
const isCollapsed =
(useSubBlockStore((state) => state.getValue(blockId, collapsedStateKey)) as boolean) ?? false
@@ -354,6 +359,30 @@ IMPORTANT FORMATTING RULES:
}, 0)
}
const shouldHighlightReference = (part: string): boolean => {
if (!part.startsWith('<') || !part.endsWith('>')) {
return false
}
if (!isLikelyReferenceSegment(part)) {
return false
}
if (!accessiblePrefixes) {
return true
}
const inner = part.slice(1, -1)
const [prefix] = inner.split('.')
const normalizedPrefix = normalizeBlockName(prefix)
if (SYSTEM_REFERENCE_PREFIXES.has(normalizedPrefix)) {
return true
}
return accessiblePrefixes.has(normalizedPrefix)
}
const renderLineNumbers = (): ReactElement[] => {
const numbers: ReactElement[] = []
let lineNumber = 1
@@ -490,13 +519,51 @@ IMPORTANT FORMATTING RULES:
e.preventDefault()
}
}}
highlight={(codeToHighlight) =>
highlight(
codeToHighlight,
languages[effectiveLanguage === 'python' ? 'python' : 'javascript'],
effectiveLanguage === 'python' ? 'python' : 'javascript'
)
}
highlight={(codeToHighlight) => {
const placeholders: { placeholder: string; original: string; type: 'var' | 'env' }[] =
[]
let processedCode = codeToHighlight
// Replace environment variables with placeholders
processedCode = processedCode.replace(/\{\{([^}]+)\}\}/g, (match) => {
const placeholder = `__ENV_VAR_${placeholders.length}__`
placeholders.push({ placeholder, original: match, type: 'env' })
return placeholder
})
// Replace variable references with placeholders
processedCode = processedCode.replace(/<([^>]+)>/g, (match) => {
if (shouldHighlightReference(match)) {
const placeholder = `__VAR_REF_${placeholders.length}__`
placeholders.push({ placeholder, original: match, type: 'var' })
return placeholder
}
return match
})
// Apply Prism syntax highlighting
const lang = effectiveLanguage === 'python' ? 'python' : 'javascript'
let highlightedCode = highlight(processedCode, languages[lang], lang)
// Restore and highlight the placeholders
placeholders.forEach(({ placeholder, original, type }) => {
if (type === 'env') {
highlightedCode = highlightedCode.replace(
placeholder,
`<span class="text-blue-500">${original}</span>`
)
} else if (type === 'var') {
// Escape the < and > for display
const escaped = original.replace(/</g, '&lt;').replace(/>/g, '&gt;')
highlightedCode = highlightedCode.replace(
placeholder,
`<span class="text-blue-500">${escaped}</span>`
)
}
})
return highlightedCode
}}
padding={12}
style={{
fontFamily: 'inherit',

View File

@@ -203,8 +203,14 @@ export function useWand({
for (const line of lines) {
if (line.startsWith('data: ')) {
const lineData = line.substring(6)
if (lineData === '[DONE]') {
continue
}
try {
const data = JSON.parse(line.substring(6))
const data = JSON.parse(lineData)
if (data.error) {
throw new Error(data.error)

View File

@@ -30,9 +30,10 @@ interface ExecutorOptions {
workflowVariables?: Record<string, any>
contextExtensions?: {
stream?: boolean
selectedOutputIds?: string[]
selectedOutputs?: string[]
edges?: Array<{ source: string; target: string }>
onStream?: (streamingExecution: StreamingExecution) => Promise<void>
onBlockComplete?: (blockId: string, output: any) => Promise<void>
executionId?: string
workspaceId?: string
}
@@ -44,6 +45,56 @@ interface DebugValidationResult {
error?: string
}
const WORKFLOW_EXECUTION_FAILURE_MESSAGE = 'Workflow execution failed'
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null
}
function sanitizeMessage(value: unknown): string | undefined {
if (typeof value !== 'string') return undefined
const trimmed = value.trim()
if (!trimmed || trimmed === 'undefined (undefined)') return undefined
return trimmed
}
function normalizeErrorMessage(error: unknown): string {
if (error instanceof Error) {
const message = sanitizeMessage(error.message)
if (message) return message
} else if (typeof error === 'string') {
const message = sanitizeMessage(error)
if (message) return message
}
if (isRecord(error)) {
const directMessage = sanitizeMessage(error.message)
if (directMessage) return directMessage
const nestedError = error.error
if (isRecord(nestedError)) {
const nestedMessage = sanitizeMessage(nestedError.message)
if (nestedMessage) return nestedMessage
} else {
const nestedMessage = sanitizeMessage(nestedError)
if (nestedMessage) return nestedMessage
}
}
return WORKFLOW_EXECUTION_FAILURE_MESSAGE
}
function isExecutionResult(value: unknown): value is ExecutionResult {
if (!isRecord(value)) return false
return typeof value.success === 'boolean' && isRecord(value.output)
}
function extractExecutionResult(error: unknown): ExecutionResult | null {
if (!isRecord(error)) return null
const candidate = error.executionResult
return isExecutionResult(candidate) ? candidate : null
}
export function useWorkflowExecution() {
const currentWorkflow = useCurrentWorkflow()
const { activeWorkflowId, workflows } = useWorkflowRegistry()
@@ -273,7 +324,7 @@ export function useWorkflowExecution() {
if (isChatExecution) {
const stream = new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
const { encodeSSE } = await import('@/lib/utils')
const executionId = uuidv4()
const streamedContent = new Map<string, string>()
const streamReadingPromises: Promise<void>[] = []
@@ -360,6 +411,8 @@ export function useWorkflowExecution() {
if (!streamingExecution.stream) return
const reader = streamingExecution.stream.getReader()
const blockId = (streamingExecution.execution as any)?.blockId
let isFirstChunk = true
if (blockId) {
streamedContent.set(blockId, '')
}
@@ -373,14 +426,17 @@ export function useWorkflowExecution() {
if (blockId) {
streamedContent.set(blockId, (streamedContent.get(blockId) || '') + chunk)
}
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({
blockId,
chunk,
})}\n\n`
)
)
// Add separator before first chunk if this isn't the first block
let chunkToSend = chunk
if (isFirstChunk && streamedContent.size > 1) {
chunkToSend = `\n\n${chunk}`
isFirstChunk = false
} else if (isFirstChunk) {
isFirstChunk = false
}
controller.enqueue(encodeSSE({ blockId, chunk: chunkToSend }))
}
} catch (error) {
logger.error('Error reading from stream:', error)
@@ -390,8 +446,58 @@ export function useWorkflowExecution() {
streamReadingPromises.push(promise)
}
// Handle non-streaming blocks (like Function blocks)
const onBlockComplete = async (blockId: string, output: any) => {
// Get selected outputs from chat store
const chatStore = await import('@/stores/panel/chat/store').then(
(mod) => mod.useChatStore
)
const selectedOutputs = chatStore
.getState()
.getSelectedWorkflowOutput(activeWorkflowId)
if (!selectedOutputs?.length) return
const { extractBlockIdFromOutputId, extractPathFromOutputId, traverseObjectPath } =
await import('@/lib/response-format')
// Check if this block's output is selected
const matchingOutputs = selectedOutputs.filter(
(outputId) => extractBlockIdFromOutputId(outputId) === blockId
)
if (!matchingOutputs.length) return
// Process each selected output from this block
for (const outputId of matchingOutputs) {
const path = extractPathFromOutputId(outputId, blockId)
const outputValue = traverseObjectPath(output, path)
if (outputValue !== undefined) {
const formattedOutput =
typeof outputValue === 'string'
? outputValue
: JSON.stringify(outputValue, null, 2)
// Add separator if this isn't the first output
const separator = streamedContent.size > 0 ? '\n\n' : ''
// Send the non-streaming block output as a chunk
controller.enqueue(encodeSSE({ blockId, chunk: separator + formattedOutput }))
// Track that we've sent output for this block
streamedContent.set(blockId, formattedOutput)
}
}
}
try {
const result = await executeWorkflow(workflowInput, onStream, executionId)
const result = await executeWorkflow(
workflowInput,
onStream,
executionId,
onBlockComplete
)
// Check if execution was cancelled
if (
@@ -400,11 +506,7 @@ export function useWorkflowExecution() {
!result.success &&
result.error === 'Workflow execution was cancelled'
) {
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({ event: 'cancelled', data: result })}\n\n`
)
)
controller.enqueue(encodeSSE({ event: 'cancelled', data: result }))
return
}
@@ -439,9 +541,8 @@ export function useWorkflowExecution() {
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
}
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
)
const { encodeSSE } = await import('@/lib/utils')
controller.enqueue(encodeSSE({ event: 'final', data: result }))
persistLogs(executionId, result).catch((err) =>
logger.error('Error persisting logs:', err)
)
@@ -461,9 +562,8 @@ export function useWorkflowExecution() {
}
// Send the error as final event so downstream handlers can treat it uniformly
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: errorResult })}\n\n`)
)
const { encodeSSE } = await import('@/lib/utils')
controller.enqueue(encodeSSE({ event: 'final', data: errorResult }))
// Persist the error to logs so it shows up in the logs page
persistLogs(executionId, errorResult).catch((err) =>
@@ -539,7 +639,8 @@ export function useWorkflowExecution() {
const executeWorkflow = async (
workflowInput?: any,
onStream?: (se: StreamingExecution) => Promise<void>,
executionId?: string
executionId?: string,
onBlockComplete?: (blockId: string, output: any) => Promise<void>
): Promise<ExecutionResult | StreamingExecution> => {
// Use currentWorkflow but check if we're in diff mode
const { blocks: workflowBlocks, edges: workflowEdges } = currentWorkflow
@@ -664,11 +765,11 @@ export function useWorkflowExecution() {
)
// If this is a chat execution, get the selected outputs
let selectedOutputIds: string[] | undefined
let selectedOutputs: string[] | undefined
if (isExecutingFromChat && activeWorkflowId) {
// Get selected outputs from chat store
const chatStore = await import('@/stores/panel/chat/store').then((mod) => mod.useChatStore)
selectedOutputIds = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
selectedOutputs = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
}
// Helper to extract test values from inputFormat subblock
@@ -843,12 +944,13 @@ export function useWorkflowExecution() {
workflowVariables,
contextExtensions: {
stream: isExecutingFromChat,
selectedOutputIds,
selectedOutputs,
edges: workflow.connections.map((conn) => ({
source: conn.source,
target: conn.target,
})),
onStream,
onBlockComplete,
executionId,
workspaceId,
},
@@ -862,74 +964,56 @@ export function useWorkflowExecution() {
return newExecutor.execute(activeWorkflowId || '', startBlockId)
}
const handleExecutionError = (error: any, options?: { executionId?: string }) => {
let errorMessage = 'Unknown error'
if (error instanceof Error) {
errorMessage = error.message || `Error: ${String(error)}`
} else if (typeof error === 'string') {
errorMessage = error
} else if (error && typeof error === 'object') {
if (
error.message === 'undefined (undefined)' ||
(error.error &&
typeof error.error === 'object' &&
error.error.message === 'undefined (undefined)')
) {
errorMessage = 'API request failed - no specific error details available'
} else if (error.message) {
errorMessage = error.message
} else if (error.error && typeof error.error === 'string') {
errorMessage = error.error
} else if (error.error && typeof error.error === 'object' && error.error.message) {
errorMessage = error.error.message
} else {
try {
errorMessage = `Error details: ${JSON.stringify(error)}`
} catch {
errorMessage = 'Error occurred but details could not be displayed'
}
const handleExecutionError = (error: unknown, options?: { executionId?: string }) => {
const normalizedMessage = normalizeErrorMessage(error)
const executionResultFromError = extractExecutionResult(error)
let errorResult: ExecutionResult
if (executionResultFromError) {
const logs = Array.isArray(executionResultFromError.logs) ? executionResultFromError.logs : []
errorResult = {
...executionResultFromError,
success: false,
error: executionResultFromError.error ?? normalizedMessage,
logs,
}
}
} else {
if (!executor) {
try {
let blockId = 'serialization'
let blockName = 'Workflow'
let blockType = 'serializer'
if (error instanceof WorkflowValidationError) {
blockId = error.blockId || blockId
blockName = error.blockName || blockName
blockType = error.blockType || blockType
}
if (errorMessage === 'undefined (undefined)') {
errorMessage = 'API request failed - no specific error details available'
}
useConsoleStore.getState().addConsole({
input: {},
output: {},
success: false,
error: normalizedMessage,
durationMs: 0,
startedAt: new Date().toISOString(),
endedAt: new Date().toISOString(),
workflowId: activeWorkflowId || '',
blockId,
executionId: options?.executionId,
blockName,
blockType,
})
} catch {}
}
// If we failed before creating an executor (e.g., serializer validation), add a console entry
if (!executor) {
try {
// Prefer attributing to specific subflow if we have a structured error
let blockId = 'serialization'
let blockName = 'Workflow'
let blockType = 'serializer'
if (error instanceof WorkflowValidationError) {
blockId = error.blockId || blockId
blockName = error.blockName || blockName
blockType = error.blockType || blockType
}
useConsoleStore.getState().addConsole({
input: {},
output: {},
success: false,
error: errorMessage,
durationMs: 0,
startedAt: new Date().toISOString(),
endedAt: new Date().toISOString(),
workflowId: activeWorkflowId || '',
blockId,
executionId: options?.executionId,
blockName,
blockType,
})
} catch {}
}
const errorResult: ExecutionResult = {
success: false,
output: {},
error: errorMessage,
logs: [],
errorResult = {
success: false,
output: {},
error: normalizedMessage,
logs: [],
}
}
setExecutionResult(errorResult)
@@ -937,16 +1021,14 @@ export function useWorkflowExecution() {
setIsDebugging(false)
setActiveBlocks(new Set())
let notificationMessage = 'Workflow execution failed'
if (error?.request?.url) {
if (error.request.url && error.request.url.trim() !== '') {
notificationMessage += `: Request to ${error.request.url} failed`
if (error.status) {
notificationMessage += ` (Status: ${error.status})`
}
let notificationMessage = WORKFLOW_EXECUTION_FAILURE_MESSAGE
if (isRecord(error) && isRecord(error.request) && sanitizeMessage(error.request.url)) {
notificationMessage += `: Request to ${(error.request.url as string).trim()} failed`
if ('status' in error && typeof error.status === 'number') {
notificationMessage += ` (Status: ${error.status})`
}
} else {
notificationMessage += `: ${errorMessage}`
} else if (sanitizeMessage(errorResult.error)) {
notificationMessage += `: ${errorResult.error}`
}
return errorResult

View File

@@ -30,7 +30,7 @@ interface ExecutorOptions {
workflowVariables?: Record<string, any>
contextExtensions?: {
stream?: boolean
selectedOutputIds?: string[]
selectedOutputs?: string[]
edges?: Array<{ source: string; target: string }>
onStream?: (streamingExecution: StreamingExecution) => Promise<void>
executionId?: string
@@ -181,11 +181,11 @@ export async function executeWorkflowWithLogging(
)
// If this is a chat execution, get the selected outputs
let selectedOutputIds: string[] | undefined
let selectedOutputs: string[] | undefined
if (isExecutingFromChat) {
// Get selected outputs from chat store
const chatStore = await import('@/stores/panel/chat/store').then((mod) => mod.useChatStore)
selectedOutputIds = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
selectedOutputs = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
}
// Create executor options
@@ -197,7 +197,7 @@ export async function executeWorkflowWithLogging(
workflowVariables,
contextExtensions: {
stream: isExecutingFromChat,
selectedOutputIds,
selectedOutputs,
edges: workflow.connections.map((conn) => ({
source: conn.source,
target: conn.target,

View File

@@ -1422,7 +1422,7 @@ const WorkflowContent = React.memo(() => {
setDraggedNodeId(node.id)
// Emit collaborative position update during drag for smooth real-time movement
collaborativeUpdateBlockPosition(node.id, node.position)
collaborativeUpdateBlockPosition(node.id, node.position, false)
// Get the current parent ID of the node being dragged
const currentParentId = blocks[node.id]?.data?.parentId || null
@@ -1608,7 +1608,7 @@ const WorkflowContent = React.memo(() => {
// Emit collaborative position update for the final position
// This ensures other users see the smooth final position
collaborativeUpdateBlockPosition(node.id, node.position)
collaborativeUpdateBlockPosition(node.id, node.position, true)
// Record single move entry on drag end to avoid micro-moves
try {

View File

@@ -510,7 +510,7 @@ export function ApiKeys({ onOpenChange, registerCloseHandler }: ApiKeysProps) {
setKeyType('personal')
if (createError) setCreateError(null)
}}
className='h-8'
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
>
Personal
</Button>
@@ -522,7 +522,7 @@ export function ApiKeys({ onOpenChange, registerCloseHandler }: ApiKeysProps) {
setKeyType('workspace')
if (createError) setCreateError(null)
}}
className='h-8'
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
>
Workspace
</Button>
@@ -549,7 +549,7 @@ export function ApiKeys({ onOpenChange, registerCloseHandler }: ApiKeysProps) {
<AlertDialogFooter className='flex'>
<AlertDialogCancel
className='h-9 w-full rounded-[8px]'
className='h-9 w-full rounded-[8px] border-border bg-background text-foreground hover:bg-muted dark:border-border dark:bg-background dark:text-foreground dark:hover:bg-muted/80'
onClick={() => {
setNewKeyName('')
setKeyType('personal')

View File

@@ -32,6 +32,7 @@ interface WorkflowPreviewProps {
isPannable?: boolean
defaultPosition?: { x: number; y: number }
defaultZoom?: number
fitPadding?: number
onNodeClick?: (blockId: string, mousePosition: { x: number; y: number }) => void
}
@@ -54,7 +55,8 @@ export function WorkflowPreview({
width = '100%',
isPannable = false,
defaultPosition,
defaultZoom,
defaultZoom = 0.8,
fitPadding = 0.25,
onNodeClick,
}: WorkflowPreviewProps) {
// Check if the workflow state is valid
@@ -274,6 +276,7 @@ export function WorkflowPreview({
edgeTypes={edgeTypes}
connectionLineType={ConnectionLineType.SmoothStep}
fitView
fitViewOptions={{ padding: fitPadding }}
panOnScroll={false}
panOnDrag={isPannable}
zoomOnScroll={false}
@@ -298,7 +301,12 @@ export function WorkflowPreview({
: undefined
}
>
<Background />
<Background
color='hsl(var(--workflow-dots))'
size={4}
gap={40}
style={{ backgroundColor: 'hsl(var(--workflow-background))' }}
/>
</ReactFlow>
</div>
</ReactFlowProvider>

View File

@@ -17,6 +17,7 @@ import {
} from '@/lib/workflows/db-helpers'
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
import { Executor } from '@/executor'
import type { ExecutionResult } from '@/executor/types'
import { Serializer } from '@/serializer'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
@@ -386,6 +387,13 @@ async function executeWebhookJobInternal(
// Complete logging session with error (matching workflow-execution pattern)
try {
const executionResult = (error?.executionResult as ExecutionResult | undefined) || {
success: false,
output: {},
logs: [],
}
const { traceSpans } = buildTraceSpans(executionResult)
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
@@ -393,6 +401,7 @@ async function executeWebhookJobInternal(
message: error.message || 'Webhook execution failed',
stackTrace: error.stack,
},
traceSpans,
})
} catch (loggingError) {
logger.error(`[${requestId}] Failed to complete logging session`, loggingError)

View File

@@ -192,6 +192,9 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
stack: error.stack,
})
const executionResult = error?.executionResult || { success: false, output: {}, logs: [] }
const { traceSpans } = buildTraceSpans(executionResult)
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
@@ -199,6 +202,7 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
message: error.message || 'Workflow execution failed',
stackTrace: error.stack,
},
traceSpans,
})
throw error

View File

@@ -13,7 +13,7 @@ import {
} from '@react-email/components'
import { format } from 'date-fns'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -24,7 +24,7 @@ interface EnterpriseSubscriptionEmailProps {
createdDate?: Date
}
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
export const EnterpriseSubscriptionEmail = ({
userName = 'Valued User',

View File

@@ -1,7 +1,6 @@
import { Container, Img, Link, Section, Text } from '@react-email/components'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getAssetUrl } from '@/lib/utils'
import { getEnv } from '@/lib/env'
interface UnsubscribeOptions {
unsubscribeToken?: string
@@ -14,7 +13,7 @@ interface EmailFooterProps {
}
export const EmailFooter = ({
baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai',
baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai',
unsubscribe,
}: EmailFooterProps) => {
const brand = getBrandConfig()
@@ -29,13 +28,13 @@ export const EmailFooter = ({
<tr>
<td align='center' style={{ padding: '0 8px' }}>
<Link href='https://x.com/simdotai' rel='noopener noreferrer'>
<Img src={getAssetUrl('static/x-icon.png')} width='24' height='24' alt='X' />
<Img src={`${baseUrl}/static/x-icon.png`} width='24' height='24' alt='X' />
</Link>
</td>
<td align='center' style={{ padding: '0 8px' }}>
<Link href='https://discord.gg/Hr4UWYEcTT' rel='noopener noreferrer'>
<Img
src={getAssetUrl('static/discord-icon.png')}
src={`${baseUrl}/static/discord-icon.png`}
width='24'
height='24'
alt='Discord'
@@ -45,7 +44,7 @@ export const EmailFooter = ({
<td align='center' style={{ padding: '0 8px' }}>
<Link href='https://github.com/simstudioai/sim' rel='noopener noreferrer'>
<Img
src={getAssetUrl('static/github-icon.png')}
src={`${baseUrl}/static/github-icon.png`}
width='24'
height='24'
alt='GitHub'

View File

@@ -12,7 +12,7 @@ import {
} from '@react-email/components'
import { format } from 'date-fns'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -23,7 +23,7 @@ interface HelpConfirmationEmailProps {
submittedDate?: Date
}
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
const getTypeLabel = (type: string) => {
switch (type) {

View File

@@ -13,7 +13,7 @@ import {
} from '@react-email/components'
import { format } from 'date-fns'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -26,7 +26,7 @@ interface InvitationEmailProps {
updatedDate?: Date
}
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
const logger = createLogger('InvitationEmail')

View File

@@ -11,7 +11,7 @@ import {
Text,
} from '@react-email/components'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -22,7 +22,7 @@ interface OTPVerificationEmailProps {
chatTitle?: string
}
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
const getSubjectByType = (type: string, brandName: string, chatTitle?: string) => {
switch (type) {

View File

@@ -14,7 +14,7 @@ import {
} from '@react-email/components'
import EmailFooter from '@/components/emails/footer'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { baseStyles } from './base-styles'
interface PlanWelcomeEmailProps {
@@ -31,7 +31,7 @@ export function PlanWelcomeEmail({
createdDate = new Date(),
}: PlanWelcomeEmailProps) {
const brand = getBrandConfig()
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
const cta = loginLink || `${baseUrl}/login`
const previewText = `${brand.name}: Your ${planName} plan is active`

View File

@@ -13,7 +13,7 @@ import {
} from '@react-email/components'
import { format } from 'date-fns'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -23,7 +23,7 @@ interface ResetPasswordEmailProps {
updatedDate?: Date
}
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
export const ResetPasswordEmail = ({
username = '',

View File

@@ -14,7 +14,7 @@ import {
} from '@react-email/components'
import EmailFooter from '@/components/emails/footer'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { baseStyles } from './base-styles'
interface UsageThresholdEmailProps {
@@ -37,7 +37,7 @@ export function UsageThresholdEmail({
updatedDate = new Date(),
}: UsageThresholdEmailProps) {
const brand = getBrandConfig()
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
const previewText = `${brand.name}: You're at ${percentUsed}% of your ${planName} monthly budget`

View File

@@ -12,7 +12,7 @@ import {
Text,
} from '@react-email/components'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getEnv } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { baseStyles } from './base-styles'
import EmailFooter from './footer'
@@ -25,7 +25,7 @@ interface WorkspaceInvitationEmailProps {
invitationLink?: string
}
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'https://sim.ai'
const baseUrl = getEnv('NEXT_PUBLIC_APP_URL') || 'https://sim.ai'
export const WorkspaceInvitationEmail = ({
workspaceName = 'Workspace',

View File

@@ -600,22 +600,37 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
// Apply light throttling only to position updates for smooth collaborative experience
const isPositionUpdate = operation === 'update-position' && target === 'block'
const { commit = true } = payload || {}
if (isPositionUpdate && payload.id) {
const blockId = payload.id
// Store the latest position update
if (commit) {
socket.emit('workflow-operation', {
operation,
target,
payload,
timestamp: Date.now(),
operationId,
})
pendingPositionUpdates.current.delete(blockId)
const timeoutId = positionUpdateTimeouts.current.get(blockId)
if (timeoutId) {
clearTimeout(timeoutId)
positionUpdateTimeouts.current.delete(blockId)
}
return
}
pendingPositionUpdates.current.set(blockId, {
operation,
target,
payload,
timestamp: Date.now(),
operationId, // Include operation ID for queue tracking
operationId,
})
// Check if we already have a pending timeout for this block
if (!positionUpdateTimeouts.current.has(blockId)) {
// Schedule emission with optimized throttling (30fps = ~33ms) to reduce DB load
const timeoutId = window.setTimeout(() => {
const latestUpdate = pendingPositionUpdates.current.get(blockId)
if (latestUpdate) {
@@ -623,7 +638,7 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
pendingPositionUpdates.current.delete(blockId)
}
positionUpdateTimeouts.current.delete(blockId)
}, 33) // 30fps - good balance between smoothness and DB performance
}, 33)
positionUpdateTimeouts.current.set(blockId, timeoutId)
}

View File

@@ -12,7 +12,8 @@ export enum BlockType {
API = 'api',
EVALUATOR = 'evaluator',
RESPONSE = 'response',
WORKFLOW = 'workflow',
WORKFLOW = 'workflow', // Deprecated - kept for backwards compatibility
WORKFLOW_INPUT = 'workflow_input', // Current workflow block type
STARTER = 'starter',
}
@@ -27,3 +28,10 @@ export const ALL_BLOCK_TYPES = Object.values(BlockType) as string[]
export function isValidBlockType(type: string): type is BlockType {
return ALL_BLOCK_TYPES.includes(type)
}
/**
* Helper to check if a block type is a workflow block (current or deprecated)
*/
export function isWorkflowBlockType(blockType: string | undefined): boolean {
return blockType === BlockType.WORKFLOW || blockType === BlockType.WORKFLOW_INPUT
}

View File

@@ -886,7 +886,7 @@ describe('AgentBlockHandler', () => {
}
mockContext.stream = true
mockContext.selectedOutputIds = [mockBlock.id]
mockContext.selectedOutputs = [mockBlock.id]
const result = await handler.execute(mockBlock, inputs, mockContext)
@@ -955,7 +955,7 @@ describe('AgentBlockHandler', () => {
}
mockContext.stream = true
mockContext.selectedOutputIds = [mockBlock.id]
mockContext.selectedOutputs = [mockBlock.id]
const result = await handler.execute(mockBlock, inputs, mockContext)
@@ -1012,7 +1012,7 @@ describe('AgentBlockHandler', () => {
}
mockContext.stream = true
mockContext.selectedOutputIds = [mockBlock.id]
mockContext.selectedOutputs = [mockBlock.id]
const result = await handler.execute(mockBlock, inputs, mockContext)

View File

@@ -371,7 +371,7 @@ export class AgentBlockHandler implements BlockHandler {
private getStreamingConfig(block: SerializedBlock, context: ExecutionContext): StreamingConfig {
const isBlockSelectedForOutput =
context.selectedOutputIds?.some((outputId) => {
context.selectedOutputs?.some((outputId) => {
if (outputId === block.id) return true
const firstUnderscoreIndex = outputId.indexOf('_')
return (
@@ -382,10 +382,6 @@ export class AgentBlockHandler implements BlockHandler {
const hasOutgoingConnections = context.edges?.some((edge) => edge.source === block.id) ?? false
const shouldUseStreaming = Boolean(context.stream) && isBlockSelectedForOutput
if (shouldUseStreaming) {
logger.info(`Block ${block.id} will use streaming response`)
}
return { shouldUseStreaming, isBlockSelectedForOutput, hasOutgoingConnections }
}

View File

@@ -1,17 +1,29 @@
import { generateInternalToken } from '@/lib/auth/internal'
import { createLogger } from '@/lib/logs/console/logger'
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import type { TraceSpan } from '@/lib/logs/types'
import { getBaseUrl } from '@/lib/urls/utils'
import type { BlockOutput } from '@/blocks/types'
import { Executor } from '@/executor'
import { BlockType } from '@/executor/consts'
import type { BlockHandler, ExecutionContext, StreamingExecution } from '@/executor/types'
import type {
BlockHandler,
ExecutionContext,
ExecutionResult,
StreamingExecution,
} from '@/executor/types'
import { Serializer } from '@/serializer'
import type { SerializedBlock } from '@/serializer/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('WorkflowBlockHandler')
type WorkflowTraceSpan = TraceSpan & {
metadata?: Record<string, unknown>
children?: WorkflowTraceSpan[]
output?: (Record<string, unknown> & { childTraceSpans?: WorkflowTraceSpan[] }) | null
}
// Maximum allowed depth for nested workflow executions
const MAX_WORKFLOW_DEPTH = 10
@@ -125,13 +137,19 @@ export class WorkflowBlockHandler implements BlockHandler {
// Use the actual child workflow ID for authentication, not the execution ID
// This ensures knowledge base and other API calls can properly authenticate
const result = await subExecutor.execute(workflowId)
const executionResult = this.toExecutionResult(result)
const duration = performance.now() - startTime
logger.info(`Child workflow ${childWorkflowName} completed in ${Math.round(duration)}ms`)
const childTraceSpans = this.captureChildWorkflowLogs(result, childWorkflowName, context)
const childTraceSpans = this.captureChildWorkflowLogs(
executionResult,
childWorkflowName,
context
)
const mappedResult = this.mapChildOutputToParent(
result,
executionResult,
workflowId,
childWorkflowName,
duration,
@@ -146,6 +164,7 @@ export class WorkflowBlockHandler implements BlockHandler {
// Attach trace spans and name for higher-level logging to consume
errorWithSpans.childTraceSpans = childTraceSpans
errorWithSpans.childWorkflowName = childWorkflowName
errorWithSpans.executionResult = executionResult
throw errorWithSpans
}
@@ -162,7 +181,19 @@ export class WorkflowBlockHandler implements BlockHandler {
throw error // Re-throw as-is to avoid duplication
}
throw new Error(`Error in child workflow "${childWorkflowName}": ${originalError}`)
const wrappedError = new Error(
`Error in child workflow "${childWorkflowName}": ${originalError}`
) as any
if (error.childTraceSpans) {
wrappedError.childTraceSpans = error.childTraceSpans
}
if (error.childWorkflowName) {
wrappedError.childWorkflowName = error.childWorkflowName
}
if (error.executionResult) {
wrappedError.executionResult = error.executionResult
}
throw wrappedError
}
}
@@ -318,10 +349,10 @@ export class WorkflowBlockHandler implements BlockHandler {
* Captures and transforms child workflow logs into trace spans
*/
private captureChildWorkflowLogs(
childResult: any,
childResult: ExecutionResult,
childWorkflowName: string,
parentContext: ExecutionContext
): any[] {
): WorkflowTraceSpan[] {
try {
if (!childResult.logs || !Array.isArray(childResult.logs)) {
return []
@@ -333,9 +364,15 @@ export class WorkflowBlockHandler implements BlockHandler {
return []
}
const transformedSpans = traceSpans.map((span: any) => {
return this.transformSpanForChildWorkflow(span, childWorkflowName)
})
const processedSpans = this.processChildWorkflowSpans(traceSpans)
if (processedSpans.length === 0) {
return []
}
const transformedSpans = processedSpans.map((span) =>
this.transformSpanForChildWorkflow(span, childWorkflowName)
)
return transformedSpans
} catch (error) {
@@ -347,67 +384,111 @@ export class WorkflowBlockHandler implements BlockHandler {
/**
* Transforms trace span for child workflow context
*/
private transformSpanForChildWorkflow(span: any, childWorkflowName: string): any {
const transformedSpan = {
private transformSpanForChildWorkflow(
span: WorkflowTraceSpan,
childWorkflowName: string
): WorkflowTraceSpan {
const metadata: Record<string, unknown> = {
...(span.metadata ?? {}),
isFromChildWorkflow: true,
childWorkflowName,
}
const transformedChildren = Array.isArray(span.children)
? span.children.map((childSpan) =>
this.transformSpanForChildWorkflow(childSpan, childWorkflowName)
)
: undefined
return {
...span,
name: this.cleanChildSpanName(span.name, childWorkflowName),
metadata: {
...span.metadata,
isFromChildWorkflow: true,
childWorkflowName,
},
metadata,
...(transformedChildren ? { children: transformedChildren } : {}),
}
if (span.children && Array.isArray(span.children)) {
transformedSpan.children = span.children.map((childSpan: any) =>
this.transformSpanForChildWorkflow(childSpan, childWorkflowName)
)
}
if (span.output?.childTraceSpans) {
transformedSpan.output = {
...transformedSpan.output,
childTraceSpans: span.output.childTraceSpans,
}
}
return transformedSpan
}
/**
* Cleans up child span names for readability
*/
private cleanChildSpanName(spanName: string, childWorkflowName: string): string {
if (spanName.includes(`${childWorkflowName}:`)) {
const cleanName = spanName.replace(`${childWorkflowName}:`, '').trim()
private processChildWorkflowSpans(spans: TraceSpan[]): WorkflowTraceSpan[] {
const processed: WorkflowTraceSpan[] = []
if (cleanName === 'Workflow Execution') {
return `${childWorkflowName} workflow`
spans.forEach((span) => {
if (this.isSyntheticWorkflowWrapper(span)) {
if (span.children && Array.isArray(span.children)) {
processed.push(...this.processChildWorkflowSpans(span.children))
}
return
}
if (cleanName.startsWith('Agent ')) {
return `${cleanName}`
const workflowSpan: WorkflowTraceSpan = {
...span,
}
return `${cleanName}`
}
if (Array.isArray(workflowSpan.children)) {
workflowSpan.children = this.processChildWorkflowSpans(workflowSpan.children as TraceSpan[])
}
if (spanName === 'Workflow Execution') {
return `${childWorkflowName} workflow`
}
processed.push(workflowSpan)
})
return `${spanName}`
return processed
}
private flattenChildWorkflowSpans(spans: TraceSpan[]): WorkflowTraceSpan[] {
const flattened: WorkflowTraceSpan[] = []
spans.forEach((span) => {
if (this.isSyntheticWorkflowWrapper(span)) {
if (span.children && Array.isArray(span.children)) {
flattened.push(...this.flattenChildWorkflowSpans(span.children))
}
return
}
const workflowSpan: WorkflowTraceSpan = {
...span,
}
if (Array.isArray(workflowSpan.children)) {
const childSpans = workflowSpan.children as TraceSpan[]
workflowSpan.children = this.flattenChildWorkflowSpans(childSpans)
}
if (workflowSpan.output && typeof workflowSpan.output === 'object') {
const { childTraceSpans: nestedChildSpans, ...outputRest } = workflowSpan.output as {
childTraceSpans?: TraceSpan[]
} & Record<string, unknown>
if (Array.isArray(nestedChildSpans) && nestedChildSpans.length > 0) {
const flattenedNestedChildren = this.flattenChildWorkflowSpans(nestedChildSpans)
workflowSpan.children = [...(workflowSpan.children || []), ...flattenedNestedChildren]
}
workflowSpan.output = outputRest
}
flattened.push(workflowSpan)
})
return flattened
}
private toExecutionResult(result: ExecutionResult | StreamingExecution): ExecutionResult {
return 'execution' in result ? result.execution : result
}
private isSyntheticWorkflowWrapper(span: TraceSpan | undefined): boolean {
if (!span || span.type !== 'workflow') return false
return !span.blockId
}
/**
* Maps child workflow output to parent block output
*/
private mapChildOutputToParent(
childResult: any,
childResult: ExecutionResult,
childWorkflowId: string,
childWorkflowName: string,
duration: number,
childTraceSpans?: any[]
childTraceSpans?: WorkflowTraceSpan[]
): BlockOutput {
const success = childResult.success !== false
if (!success) {

View File

@@ -101,7 +101,7 @@ describe('Executor', () => {
workflow,
contextExtensions: {
stream: true,
selectedOutputIds: ['block1'],
selectedOutputs: ['block1'],
edges: [{ source: 'starter', target: 'block1' }],
onStream: mockOnStream,
},
@@ -302,7 +302,7 @@ describe('Executor', () => {
workflow,
contextExtensions: {
stream: true,
selectedOutputIds: ['block1'],
selectedOutputs: ['block1'],
onStream: mockOnStream,
},
})
@@ -322,14 +322,14 @@ describe('Executor', () => {
it.concurrent('should pass context extensions to execution context', async () => {
const workflow = createMinimalWorkflow()
const mockOnStream = vi.fn()
const selectedOutputIds = ['block1', 'block2']
const selectedOutputs = ['block1', 'block2']
const edges = [{ source: 'starter', target: 'block1' }]
const executor = new Executor({
workflow,
contextExtensions: {
stream: true,
selectedOutputIds,
selectedOutputs,
edges,
onStream: mockOnStream,
},
@@ -618,7 +618,7 @@ describe('Executor', () => {
workflow,
contextExtensions: {
stream: true,
selectedOutputIds: ['block1'],
selectedOutputs: ['block1'],
onStream: mockOnStream,
},
})
@@ -639,7 +639,7 @@ describe('Executor', () => {
workflow,
contextExtensions: {
stream: true,
selectedOutputIds: ['block1'],
selectedOutputs: ['block1'],
onStream: mockOnStream,
},
})

View File

@@ -3,7 +3,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import type { TraceSpan } from '@/lib/logs/types'
import { getBlock } from '@/blocks'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import { BlockType, isWorkflowBlockType } from '@/executor/consts'
import {
AgentBlockHandler,
ApiBlockHandler,
@@ -85,6 +85,53 @@ export class Executor {
private isCancelled = false
private isChildExecution = false
/**
* Updates block output with streamed content, handling both structured and unstructured responses
*/
private updateBlockOutputWithStreamedContent(
blockId: string,
fullContent: string,
blockState: any,
context: ExecutionContext
): void {
if (!blockState?.output) return
// Check if we have response format - if so, preserve structured response
let responseFormat: any
if (this.initialBlockStates?.[blockId]) {
const initialBlockState = this.initialBlockStates[blockId] as any
responseFormat = initialBlockState.responseFormat
}
if (responseFormat && fullContent) {
// For structured responses, parse the raw streaming content
try {
const parsedContent = JSON.parse(fullContent)
// Preserve metadata but spread parsed fields at root level
const structuredOutput = {
...parsedContent,
tokens: blockState.output.tokens,
toolCalls: blockState.output.toolCalls,
providerTiming: blockState.output.providerTiming,
cost: blockState.output.cost,
}
blockState.output = structuredOutput
// Also update the corresponding block log
const blockLog = context.blockLogs.find((log) => log.blockId === blockId)
if (blockLog) {
blockLog.output = structuredOutput
}
} catch (parseError) {
// If parsing fails, fall back to setting content
blockState.output.content = fullContent
}
} else {
// No response format, use standard content setting
blockState.output.content = fullContent
}
}
constructor(
private workflowParam:
| SerializedWorkflow
@@ -96,9 +143,10 @@ export class Executor {
workflowVariables?: Record<string, any>
contextExtensions?: {
stream?: boolean
selectedOutputIds?: string[]
selectedOutputs?: string[]
edges?: Array<{ source: string; target: string }>
onStream?: (streamingExecution: StreamingExecution) => Promise<void>
onBlockComplete?: (blockId: string, output: any) => Promise<void>
executionId?: string
workspaceId?: string
isChildExecution?: boolean
@@ -284,7 +332,7 @@ export class Executor {
const processedClientStream = streamingResponseFormatProcessor.processStream(
streamForClient,
blockId,
context.selectedOutputIds || [],
context.selectedOutputs || [],
responseFormat
)
@@ -312,83 +360,24 @@ export class Executor {
const blockId = (streamingExec.execution as any).blockId
const blockState = context.blockStates.get(blockId)
if (blockState?.output) {
// Check if we have response format - if so, preserve structured response
let responseFormat: any
if (this.initialBlockStates?.[blockId]) {
const initialBlockState = this.initialBlockStates[blockId] as any
responseFormat = initialBlockState.responseFormat
}
if (responseFormat && fullContent) {
// For structured responses, always try to parse the raw streaming content
// The streamForExecutor contains the raw JSON response, not the processed display text
try {
const parsedContent = JSON.parse(fullContent)
// Preserve metadata but spread parsed fields at root level (same as manual execution)
const structuredOutput = {
...parsedContent,
tokens: blockState.output.tokens,
toolCalls: blockState.output.toolCalls,
providerTiming: blockState.output.providerTiming,
cost: blockState.output.cost,
}
blockState.output = structuredOutput
// Also update the corresponding block log with the structured output
const blockLog = context.blockLogs.find((log) => log.blockId === blockId)
if (blockLog) {
blockLog.output = structuredOutput
}
} catch (parseError) {
// If parsing fails, fall back to setting content
blockState.output.content = fullContent
}
} else {
// No response format, use standard content setting
blockState.output.content = fullContent
}
}
this.updateBlockOutputWithStreamedContent(
blockId,
fullContent,
blockState,
context
)
} catch (readerError: any) {
logger.error('Error reading stream for executor:', readerError)
// Set partial content if available
const blockId = (streamingExec.execution as any).blockId
const blockState = context.blockStates.get(blockId)
if (blockState?.output && fullContent) {
// Check if we have response format for error handling too
let responseFormat: any
if (this.initialBlockStates?.[blockId]) {
const initialBlockState = this.initialBlockStates[blockId] as any
responseFormat = initialBlockState.responseFormat
}
if (responseFormat) {
// For structured responses, always try to parse the raw streaming content
// The streamForExecutor contains the raw JSON response, not the processed display text
try {
const parsedContent = JSON.parse(fullContent)
const structuredOutput = {
...parsedContent,
tokens: blockState.output.tokens,
toolCalls: blockState.output.toolCalls,
providerTiming: blockState.output.providerTiming,
cost: blockState.output.cost,
}
blockState.output = structuredOutput
// Also update the corresponding block log with the structured output
const blockLog = context.blockLogs.find((log) => log.blockId === blockId)
if (blockLog) {
blockLog.output = structuredOutput
}
} catch (parseError) {
// If parsing fails, fall back to setting content
blockState.output.content = fullContent
}
} else {
// No response format, use standard content setting
blockState.output.content = fullContent
}
if (fullContent) {
this.updateBlockOutputWithStreamedContent(
blockId,
fullContent,
blockState,
context
)
}
} finally {
try {
@@ -751,9 +740,10 @@ export class Executor {
workflow: this.actualWorkflow,
// Add streaming context from contextExtensions
stream: this.contextExtensions.stream || false,
selectedOutputIds: this.contextExtensions.selectedOutputIds || [],
selectedOutputs: this.contextExtensions.selectedOutputs || [],
edges: this.contextExtensions.edges || [],
onStream: this.contextExtensions.onStream,
onBlockComplete: this.contextExtensions.onBlockComplete,
}
Object.entries(this.initialBlockStates).forEach(([blockId, output]) => {
@@ -2145,6 +2135,14 @@ export class Executor {
success: true,
})
if (context.onBlockComplete && !isNonStreamTriggerBlock) {
try {
await context.onBlockComplete(blockId, output)
} catch (callbackError: any) {
logger.error('Error in onBlockComplete callback:', callbackError)
}
}
return output
} catch (error: any) {
// Remove this block from active blocks if there's an error
@@ -2182,7 +2180,7 @@ export class Executor {
new Date(blockLog.endedAt).getTime() - new Date(blockLog.startedAt).getTime()
// If this error came from a child workflow execution, persist its trace spans on the log
if (block.metadata?.id === BlockType.WORKFLOW) {
if (isWorkflowBlockType(block.metadata?.id)) {
this.attachChildWorkflowSpansToLog(blockLog, error)
}
@@ -2272,7 +2270,7 @@ export class Executor {
}
// Preserve child workflow spans on the block state so downstream logging can render them
if (block.metadata?.id === BlockType.WORKFLOW) {
if (isWorkflowBlockType(block.metadata?.id)) {
this.attachChildWorkflowSpansToOutput(errorOutput, error)
}
@@ -2283,7 +2281,42 @@ export class Executor {
executionTime: blockLog.durationMs,
})
// If there are error paths to follow, return error output instead of throwing
const failureEndTime = context.metadata.endTime ?? new Date().toISOString()
if (!context.metadata.endTime) {
context.metadata.endTime = failureEndTime
}
const failureDuration = context.metadata.startTime
? Math.max(
0,
new Date(failureEndTime).getTime() - new Date(context.metadata.startTime).getTime()
)
: (context.metadata.duration ?? 0)
context.metadata.duration = failureDuration
const failureMetadata = {
...context.metadata,
endTime: failureEndTime,
duration: failureDuration,
workflowConnections: this.actualWorkflow.connections.map((conn) => ({
source: conn.source,
target: conn.target,
})),
}
const upstreamExecutionResult = (error as { executionResult?: ExecutionResult } | null)
?.executionResult
const executionResultPayload: ExecutionResult = {
success: false,
output: upstreamExecutionResult?.output ?? errorOutput,
error: upstreamExecutionResult?.error ?? this.extractErrorMessage(error),
logs: [...context.blockLogs],
metadata: {
...failureMetadata,
...(upstreamExecutionResult?.metadata ?? {}),
workflowConnections: failureMetadata.workflowConnections,
},
}
if (hasErrorPath) {
// Return the error output to allow execution to continue along error path
return errorOutput
@@ -2316,7 +2349,17 @@ export class Executor {
errorMessage: this.extractErrorMessage(error),
})
throw new Error(errorMessage)
const executionError = new Error(errorMessage)
;(executionError as any).executionResult = executionResultPayload
if (Array.isArray((error as { childTraceSpans?: TraceSpan[] } | null)?.childTraceSpans)) {
;(executionError as any).childTraceSpans = (
error as { childTraceSpans?: TraceSpan[] }
).childTraceSpans
;(executionError as any).childWorkflowName = (
error as { childWorkflowName?: string }
).childWorkflowName
}
throw executionError
}
}
@@ -2329,11 +2372,12 @@ export class Executor {
error as { childTraceSpans?: TraceSpan[]; childWorkflowName?: string } | null | undefined
)?.childTraceSpans
if (Array.isArray(spans) && spans.length > 0) {
const childWorkflowName = (error as { childWorkflowName?: string } | null | undefined)
?.childWorkflowName
blockLog.output = {
...(blockLog.output || {}),
childTraceSpans: spans,
childWorkflowName: (error as { childWorkflowName?: string } | null | undefined)
?.childWorkflowName,
childWorkflowName,
}
}
}
@@ -2516,7 +2560,7 @@ export class Executor {
* Preserves child workflow trace spans for proper nesting
*/
private integrateChildWorkflowLogs(block: SerializedBlock, output: NormalizedBlockOutput): void {
if (block.metadata?.id !== BlockType.WORKFLOW) {
if (!isWorkflowBlockType(block.metadata?.id)) {
return
}

View File

@@ -169,11 +169,12 @@ export interface ExecutionContext {
// Streaming support and output selection
stream?: boolean // Whether to use streaming responses when available
selectedOutputIds?: string[] // IDs of blocks selected for streaming output
selectedOutputs?: string[] // IDs of blocks selected for streaming output
edges?: Array<{ source: string; target: string }> // Workflow edge connections
// New context extensions
onStream?: (streamingExecution: StreamingExecution) => Promise<string>
onBlockComplete?: (blockId: string, output: any) => Promise<void>
}
/**
@@ -295,7 +296,7 @@ export interface ResponseFormatStreamProcessor {
processStream(
originalStream: ReadableStream,
blockId: string,
selectedOutputIds: string[],
selectedOutputs: string[],
responseFormat?: any
): ReadableStream
}

View File

@@ -11,11 +11,11 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
processStream(
originalStream: ReadableStream,
blockId: string,
selectedOutputIds: string[],
selectedOutputs: string[],
responseFormat?: any
): ReadableStream {
// Check if this block has response format selected outputs
const hasResponseFormatSelection = selectedOutputIds.some((outputId) => {
const hasResponseFormatSelection = selectedOutputs.some((outputId) => {
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]
: outputId.split('.')[0]
@@ -28,7 +28,7 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
}
// Get the selected field names for this block
const selectedFields = selectedOutputIds
const selectedFields = selectedOutputs
.filter((outputId) => {
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]

View File

@@ -868,13 +868,19 @@ export function useCollaborativeWorkflow() {
)
const collaborativeUpdateBlockPosition = useCallback(
(id: string, position: Position) => {
// Only apply position updates here (no undo recording to avoid micro-moves)
executeQueuedDebouncedOperation('update-position', 'block', { id, position }, () =>
(id: string, position: Position, commit = true) => {
if (commit) {
executeQueuedOperation('update-position', 'block', { id, position, commit }, () => {
workflowStore.updateBlockPosition(id, position)
})
return
}
executeQueuedDebouncedOperation('update-position', 'block', { id, position }, () => {
workflowStore.updateBlockPosition(id, position)
)
})
},
[executeQueuedDebouncedOperation, workflowStore]
[executeQueuedDebouncedOperation, executeQueuedOperation, workflowStore]
)
const collaborativeUpdateBlockName = useCallback(

View File

@@ -429,7 +429,15 @@ export function useUndoRedo() {
blockSnapshot.position,
blockSnapshot.data,
blockSnapshot.data?.parentId,
blockSnapshot.data?.extent
blockSnapshot.data?.extent,
{
enabled: blockSnapshot.enabled,
horizontalHandles: blockSnapshot.horizontalHandles,
isWide: blockSnapshot.isWide,
advancedMode: blockSnapshot.advancedMode,
triggerMode: blockSnapshot.triggerMode,
height: blockSnapshot.height,
}
)
// Set subblock values for the main block locally
@@ -471,7 +479,15 @@ export function useUndoRedo() {
snap.position,
snap.data,
snap.data?.parentId,
snap.data?.extent
snap.data?.extent,
{
enabled: snap.enabled,
horizontalHandles: snap.horizontalHandles,
isWide: snap.isWide,
advancedMode: snap.advancedMode,
triggerMode: snap.triggerMode,
height: snap.height,
}
)
// Send to server with subBlocks included in payload
@@ -587,6 +603,7 @@ export function useUndoRedo() {
id: moveOp.data.blockId,
position: { x: moveOp.data.after.x, y: moveOp.data.after.y },
parentId: moveOp.data.after.parentId,
commit: true,
isUndo: true,
originalOpId: entry.id,
},
@@ -690,6 +707,7 @@ export function useUndoRedo() {
payload: {
id: blockId,
position: newPosition,
commit: true,
isUndo: true,
originalOpId: entry.id,
},
@@ -801,7 +819,15 @@ export function useUndoRedo() {
snap.position,
snap.data,
snap.data?.parentId,
snap.data?.extent
snap.data?.extent,
{
enabled: snap.enabled,
horizontalHandles: snap.horizontalHandles,
isWide: snap.isWide,
advancedMode: snap.advancedMode,
triggerMode: snap.triggerMode,
height: snap.height,
}
)
// Set subblock values for the main block locally
@@ -841,7 +867,15 @@ export function useUndoRedo() {
snapNested.position,
snapNested.data,
snapNested.data?.parentId,
snapNested.data?.extent
snapNested.data?.extent,
{
enabled: snapNested.enabled,
horizontalHandles: snapNested.horizontalHandles,
isWide: snapNested.isWide,
advancedMode: snapNested.advancedMode,
triggerMode: snapNested.triggerMode,
height: snapNested.height,
}
)
// Send to server with subBlocks included
@@ -1058,7 +1092,15 @@ export function useUndoRedo() {
duplicatedBlockSnapshot.position,
duplicatedBlockSnapshot.data,
duplicatedBlockSnapshot.data?.parentId,
duplicatedBlockSnapshot.data?.extent
duplicatedBlockSnapshot.data?.extent,
{
enabled: duplicatedBlockSnapshot.enabled,
horizontalHandles: duplicatedBlockSnapshot.horizontalHandles,
isWide: duplicatedBlockSnapshot.isWide,
advancedMode: duplicatedBlockSnapshot.advancedMode,
triggerMode: duplicatedBlockSnapshot.triggerMode,
height: duplicatedBlockSnapshot.height,
}
)
// Restore subblock values

View File

@@ -125,6 +125,27 @@ export async function updateApiKeyLastUsed(keyId: string): Promise<void> {
}
}
/**
* Given a pinned API key ID, resolve the owning userId (actor).
* Returns null if not found.
*/
export async function getApiKeyOwnerUserId(
pinnedApiKeyId: string | null | undefined
): Promise<string | null> {
if (!pinnedApiKeyId) return null
try {
const rows = await db
.select({ userId: apiKeyTable.userId })
.from(apiKeyTable)
.where(eq(apiKeyTable.id, pinnedApiKeyId))
.limit(1)
return rows[0]?.userId ?? null
} catch (error) {
logger.error('Error resolving API key owner', { error, pinnedApiKeyId })
return null
}
}
/**
* Get the API encryption key from the environment
* @returns The API encryption key

View File

@@ -57,10 +57,7 @@ export async function verifyInternalToken(token: string): Promise<boolean> {
export function verifyCronAuth(request: NextRequest, context?: string): NextResponse | null {
const authHeader = request.headers.get('authorization')
const expectedAuth = `Bearer ${env.CRON_SECRET}`
const isVercelCron = request.headers.get('x-vercel-cron') === '1'
// Allow Vercel Cron requests (they include x-vercel-cron header instead of Authorization)
if (!isVercelCron && authHeader !== expectedAuth) {
if (authHeader !== expectedAuth) {
const contextInfo = context ? ` for ${context}` : ''
logger.warn(`Unauthorized CRON access attempt${contextInfo}`, {
providedAuth: authHeader,

View File

@@ -1,7 +1,6 @@
import type { Metadata } from 'next'
import { getBrandConfig } from '@/lib/branding/branding'
import { env } from '@/lib/env'
import { getAssetUrl } from '@/lib/utils'
/**
* Generate dynamic metadata based on brand configuration
@@ -70,7 +69,7 @@ export function generateBrandedMetadata(override: Partial<Metadata> = {}): Metad
siteName: brand.name,
images: [
{
url: brand.logoUrl || getAssetUrl('social/facebook.png'),
url: brand.logoUrl || '/social/facebook.png',
width: 1200,
height: 630,
alt: brand.name,
@@ -81,7 +80,7 @@ export function generateBrandedMetadata(override: Partial<Metadata> = {}): Metad
card: 'summary_large_image',
title: defaultTitle,
description: summaryFull,
images: [brand.logoUrl || getAssetUrl('social/twitter.png')],
images: [brand.logoUrl || '/social/twitter.png'],
creator: '@simstudioai',
site: '@simstudioai',
},

View File

@@ -244,9 +244,6 @@ export const env = createEnv({
// Client-side Services
NEXT_PUBLIC_SOCKET_URL: z.string().url().optional(), // WebSocket server URL for real-time features
// Asset Storage
NEXT_PUBLIC_BLOB_BASE_URL: z.string().url().optional(), // Base URL for Vercel Blob storage (CDN assets)
// Billing
NEXT_PUBLIC_BILLING_ENABLED: z.boolean().optional(), // Enable billing enforcement and usage tracking (client-side)
@@ -294,7 +291,6 @@ export const env = createEnv({
experimental__runtimeEnv: {
NEXT_PUBLIC_APP_URL: process.env.NEXT_PUBLIC_APP_URL,
NEXT_PUBLIC_BLOB_BASE_URL: process.env.NEXT_PUBLIC_BLOB_BASE_URL,
NEXT_PUBLIC_BILLING_ENABLED: process.env.NEXT_PUBLIC_BILLING_ENABLED,
NEXT_PUBLIC_GOOGLE_CLIENT_ID: process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID,
NEXT_PUBLIC_GOOGLE_API_KEY: process.env.NEXT_PUBLIC_GOOGLE_API_KEY,

View File

@@ -135,6 +135,7 @@ export async function updateKnowledgeBase(
updates: {
name?: string
description?: string
workspaceId?: string | null
chunkingConfig?: {
maxSize: number
minSize: number
@@ -148,6 +149,7 @@ export async function updateKnowledgeBase(
updatedAt: Date
name?: string
description?: string | null
workspaceId?: string | null
chunkingConfig?: {
maxSize: number
minSize: number
@@ -161,6 +163,7 @@ export async function updateKnowledgeBase(
if (updates.name !== undefined) updateData.name = updates.name
if (updates.description !== undefined) updateData.description = updates.description
if (updates.workspaceId !== undefined) updateData.workspaceId = updates.workspaceId
if (updates.chunkingConfig !== undefined) {
updateData.chunkingConfig = updates.chunkingConfig
updateData.embeddingModel = 'text-embedding-3-small'

View File

@@ -37,6 +37,7 @@ export interface SessionErrorCompleteParams {
message?: string
stackTrace?: string
}
traceSpans?: TraceSpan[]
}
export class LoggingSession {
@@ -131,7 +132,7 @@ export class LoggingSession {
async completeWithError(params: SessionErrorCompleteParams = {}): Promise<void> {
try {
const { endedAt, totalDurationMs, error } = params
const { endedAt, totalDurationMs, error, traceSpans } = params
const endTime = endedAt ? new Date(endedAt) : new Date()
const durationMs = typeof totalDurationMs === 'number' ? totalDurationMs : 0
@@ -151,19 +152,19 @@ export class LoggingSession {
const message = error?.message || 'Execution failed before starting blocks'
const syntheticErrorSpan: TraceSpan[] = [
{
id: 'pre-execution-validation',
name: 'Workflow Error',
type: 'validation',
duration: Math.max(1, durationMs),
startTime: startTime.toISOString(),
endTime: endTime.toISOString(),
status: 'error',
children: [],
output: { error: message },
},
]
const hasProvidedSpans = Array.isArray(traceSpans) && traceSpans.length > 0
const errorSpan: TraceSpan = {
id: 'workflow-error-root',
name: 'Workflow Error',
type: 'workflow',
duration: Math.max(1, durationMs),
startTime: startTime.toISOString(),
endTime: endTime.toISOString(),
status: 'error',
...(hasProvidedSpans ? {} : { children: [] }),
output: { error: message },
}
await executionLogger.completeWorkflowExecution({
executionId: this.executionId,
@@ -171,7 +172,7 @@ export class LoggingSession {
totalDurationMs: Math.max(1, durationMs),
costSummary,
finalOutput: { error: message },
traceSpans: syntheticErrorSpan,
traceSpans: hasProvidedSpans ? traceSpans : [errorSpan],
})
if (this.requestId) {

View File

@@ -582,6 +582,195 @@ describe('buildTraceSpans', () => {
// Verify no toolCalls property exists (since we're using children instead)
expect(agentSpan.toolCalls).toBeUndefined()
})
test('should flatten nested child workflow trace spans recursively', () => {
const nestedChildSpan = {
id: 'nested-workflow-span',
name: 'Nested Workflow Block',
type: 'workflow',
blockId: 'nested-workflow-block-id',
duration: 3000,
startTime: '2024-01-01T10:00:01.000Z',
endTime: '2024-01-01T10:00:04.000Z',
status: 'success' as const,
output: {
childTraceSpans: [
{
id: 'grand-wrapper',
name: 'Workflow Execution',
type: 'workflow',
duration: 3000,
startTime: '2024-01-01T10:00:01.000Z',
endTime: '2024-01-01T10:00:04.000Z',
status: 'success' as const,
children: [
{
id: 'grand-child-block',
name: 'Deep API Call',
type: 'api',
duration: 1500,
startTime: '2024-01-01T10:00:01.500Z',
endTime: '2024-01-01T10:00:03.000Z',
status: 'success' as const,
input: { path: '/v1/test' },
output: { result: 'ok' },
},
],
},
],
},
}
const toolSpan = {
id: 'child-tool-span',
name: 'Helper Tool',
type: 'tool',
duration: 1000,
startTime: '2024-01-01T10:00:04.000Z',
endTime: '2024-01-01T10:00:05.000Z',
status: 'success' as const,
}
const mockExecutionResult: ExecutionResult = {
success: true,
output: { result: 'parent output' },
logs: [
{
blockId: 'workflow-1',
blockName: 'Child Workflow',
blockType: 'workflow',
startedAt: '2024-01-01T10:00:00.000Z',
endedAt: '2024-01-01T10:00:05.000Z',
durationMs: 5000,
success: true,
output: {
childWorkflowName: 'Child Workflow',
childTraceSpans: [
{
id: 'child-wrapper',
name: 'Workflow Execution',
type: 'workflow',
duration: 5000,
startTime: '2024-01-01T10:00:00.000Z',
endTime: '2024-01-01T10:00:05.000Z',
status: 'success' as const,
children: [nestedChildSpan, toolSpan],
},
],
},
},
],
}
const { traceSpans } = buildTraceSpans(mockExecutionResult)
expect(traceSpans).toHaveLength(1)
const workflowSpan = traceSpans[0]
expect(workflowSpan.type).toBe('workflow')
expect(workflowSpan.children).toBeDefined()
expect(workflowSpan.children).toHaveLength(2)
const nestedWorkflowSpan = workflowSpan.children?.find((span) => span.type === 'workflow')
expect(nestedWorkflowSpan).toBeDefined()
expect(nestedWorkflowSpan?.name).toBe('Nested Workflow Block')
expect(nestedWorkflowSpan?.children).toBeDefined()
expect(nestedWorkflowSpan?.children).toHaveLength(1)
expect(nestedWorkflowSpan?.children?.[0].name).toBe('Deep API Call')
expect(nestedWorkflowSpan?.children?.[0].type).toBe('api')
const helperToolSpan = workflowSpan.children?.find((span) => span.id === 'child-tool-span')
expect(helperToolSpan?.type).toBe('tool')
const syntheticWrappers = workflowSpan.children?.filter(
(span) => span.name === 'Workflow Execution'
)
expect(syntheticWrappers).toHaveLength(0)
})
test('should handle nested child workflow errors with proper hierarchy', () => {
const functionErrorSpan = {
id: 'function-error-span',
name: 'Function 1',
type: 'function',
duration: 200,
startTime: '2024-01-01T10:01:02.000Z',
endTime: '2024-01-01T10:01:02.200Z',
status: 'error' as const,
blockId: 'function-1',
output: {
error: 'Syntax Error: Line 1: `retur "HELLO"` - Unexpected string',
},
}
const rainbowCupcakeSpan = {
id: 'rainbow-workflow-span',
name: 'Rainbow Cupcake',
type: 'workflow',
duration: 300,
startTime: '2024-01-01T10:01:02.000Z',
endTime: '2024-01-01T10:01:02.300Z',
status: 'error' as const,
blockId: 'workflow-rainbow',
output: {
childWorkflowName: 'rainbow-cupcake',
error: 'Syntax Error: Line 1: `retur "HELLO"` - Unexpected string',
childTraceSpans: [functionErrorSpan],
},
}
const mockExecutionResult: ExecutionResult = {
success: false,
output: { result: null },
metadata: {
duration: 3000,
startTime: '2024-01-01T10:01:00.000Z',
},
logs: [
{
blockId: 'workflow-silk',
blockName: 'Silk Pond',
blockType: 'workflow',
startedAt: '2024-01-01T10:01:00.000Z',
endedAt: '2024-01-01T10:01:03.000Z',
durationMs: 3000,
success: false,
error:
'Error in child workflow "silk-pond": Error in child workflow "rainbow-cupcake": Syntax Error',
output: {
childWorkflowName: 'silk-pond',
childTraceSpans: [rainbowCupcakeSpan],
},
},
],
}
const { traceSpans } = buildTraceSpans(mockExecutionResult)
expect(traceSpans).toHaveLength(1)
const workflowExecutionSpan = traceSpans[0]
expect(workflowExecutionSpan.name).toBe('Workflow Execution')
expect(workflowExecutionSpan.status).toBe('error')
expect(workflowExecutionSpan.children).toBeDefined()
expect(workflowExecutionSpan.children).toHaveLength(1)
const silkPondSpan = workflowExecutionSpan.children?.[0]
expect(silkPondSpan?.name).toBe('Silk Pond')
expect(silkPondSpan?.status).toBe('error')
expect(silkPondSpan?.children).toBeDefined()
expect(silkPondSpan?.children).toHaveLength(1)
const rainbowSpan = silkPondSpan?.children?.[0]
expect(rainbowSpan?.name).toBe('Rainbow Cupcake')
expect(rainbowSpan?.status).toBe('error')
expect(rainbowSpan?.type).toBe('workflow')
expect(rainbowSpan?.children).toBeDefined()
expect(rainbowSpan?.children).toHaveLength(1)
const functionSpan = rainbowSpan?.children?.[0]
expect(functionSpan?.name).toBe('Function 1')
expect(functionSpan?.status).toBe('error')
expect((functionSpan?.output as { error?: string })?.error).toContain('Syntax Error')
})
})
describe('stripCustomToolPrefix', () => {

View File

@@ -1,9 +1,63 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { TraceSpan } from '@/lib/logs/types'
import { isWorkflowBlockType } from '@/executor/consts'
import type { ExecutionResult } from '@/executor/types'
const logger = createLogger('TraceSpans')
function isSyntheticWorkflowWrapper(span: TraceSpan | undefined): boolean {
if (!span || span.type !== 'workflow') return false
return !span.blockId
}
function flattenWorkflowChildren(spans: TraceSpan[]): TraceSpan[] {
const flattened: TraceSpan[] = []
spans.forEach((span) => {
if (isSyntheticWorkflowWrapper(span)) {
if (span.children && Array.isArray(span.children)) {
flattened.push(...flattenWorkflowChildren(span.children))
}
return
}
const processedSpan = ensureNestedWorkflowsProcessed(span)
flattened.push(processedSpan)
})
return flattened
}
function getTraceSpanKey(span: TraceSpan): string {
if (span.id) {
return span.id
}
const name = span.name || 'span'
const start = span.startTime || 'unknown-start'
const end = span.endTime || 'unknown-end'
return `${name}|${start}|${end}`
}
function mergeTraceSpanChildren(...childGroups: TraceSpan[][]): TraceSpan[] {
const merged: TraceSpan[] = []
const seen = new Set<string>()
childGroups.forEach((group) => {
group.forEach((child) => {
const key = getTraceSpanKey(child)
if (seen.has(key)) {
return
}
seen.add(key)
merged.push(child)
})
})
return merged
}
// Helper function to build a tree of trace spans from execution logs
export function buildTraceSpans(result: ExecutionResult): {
traceSpans: TraceSpan[]
@@ -56,11 +110,8 @@ export function buildTraceSpans(result: ExecutionResult): {
}
}
// Prefer human-friendly workflow block naming if provided by child execution mapping
const displayName =
log.blockType === 'workflow' && log.output?.childWorkflowName
? `${log.output.childWorkflowName} workflow`
: log.blockName || log.blockId
// Use block name consistently for all block types
const displayName = log.blockName || log.blockId
const span: TraceSpan = {
id: spanId,
@@ -106,42 +157,11 @@ export function buildTraceSpans(result: ExecutionResult): {
;(span as any).model = log.output.model
}
// Handle child workflow spans for workflow blocks
if (
log.blockType === 'workflow' &&
log.output?.childTraceSpans &&
Array.isArray(log.output.childTraceSpans)
) {
// Convert child trace spans to be direct children of this workflow block span
const childTraceSpans = log.output.childTraceSpans as TraceSpan[]
// Process child workflow spans and add them as children
const flatChildSpans: TraceSpan[] = []
childTraceSpans.forEach((childSpan) => {
// Skip the synthetic workflow span wrapper - we only want the actual block executions
if (
childSpan.type === 'workflow' &&
(childSpan.name === 'Workflow Execution' || childSpan.name.endsWith(' workflow'))
) {
// Add its children directly, skipping the synthetic wrapper
if (childSpan.children && Array.isArray(childSpan.children)) {
flatChildSpans.push(...childSpan.children)
}
} else {
// This is a regular span, add it directly
// But first, ensure nested workflow blocks in this span are also processed
const processedSpan = ensureNestedWorkflowsProcessed(childSpan)
flatChildSpans.push(processedSpan)
}
})
// Add the child spans as children of this workflow block
span.children = flatChildSpans
}
// Enhanced approach: Use timeSegments for sequential flow if available
// This provides the actual model→tool→model execution sequence
// Skip for workflow blocks since they will be processed via output.childTraceSpans at the end
if (
!isWorkflowBlockType(log.blockType) &&
log.output?.providerTiming?.timeSegments &&
Array.isArray(log.output.providerTiming.timeSegments)
) {
@@ -250,6 +270,17 @@ export function buildTraceSpans(result: ExecutionResult): {
}
}
// Handle child workflow spans for workflow blocks - process at the end to avoid being overwritten
if (
isWorkflowBlockType(log.blockType) &&
log.output?.childTraceSpans &&
Array.isArray(log.output.childTraceSpans)
) {
const childTraceSpans = log.output.childTraceSpans as TraceSpan[]
const flattenedChildren = flattenWorkflowChildren(childTraceSpans)
span.children = mergeTraceSpanChildren(span.children || [], flattenedChildren)
}
// Store in map
spanMap.set(spanId, span)
})
@@ -327,7 +358,7 @@ export function buildTraceSpans(result: ExecutionResult): {
}
// Check if this span could be a parent to future spans
if (log.blockType === 'agent' || log.blockType === 'workflow') {
if (log.blockType === 'agent' || isWorkflowBlockType(log.blockType)) {
spanStack.push(span)
}
})
@@ -594,36 +625,41 @@ function groupIterationBlocks(spans: TraceSpan[]): TraceSpan[] {
}
function ensureNestedWorkflowsProcessed(span: TraceSpan): TraceSpan {
const processedSpan = { ...span }
const processedSpan: TraceSpan = { ...span }
if (
span.type === 'workflow' &&
span.output?.childTraceSpans &&
Array.isArray(span.output.childTraceSpans)
) {
const childTraceSpans = span.output.childTraceSpans as TraceSpan[]
const nestedChildren: TraceSpan[] = []
childTraceSpans.forEach((childSpan) => {
if (
childSpan.type === 'workflow' &&
(childSpan.name === 'Workflow Execution' || childSpan.name.endsWith(' workflow'))
) {
if (childSpan.children && Array.isArray(childSpan.children)) {
childSpan.children.forEach((grandchildSpan) => {
nestedChildren.push(ensureNestedWorkflowsProcessed(grandchildSpan))
})
}
} else {
nestedChildren.push(ensureNestedWorkflowsProcessed(childSpan))
}
})
processedSpan.children = nestedChildren
} else if (span.children && Array.isArray(span.children)) {
processedSpan.children = span.children.map((child) => ensureNestedWorkflowsProcessed(child))
if (processedSpan.output && typeof processedSpan.output === 'object') {
processedSpan.output = { ...processedSpan.output }
}
const normalizedChildren = Array.isArray(span.children)
? span.children.map((child) => ensureNestedWorkflowsProcessed(child))
: []
const outputChildSpans = (() => {
if (!processedSpan.output || typeof processedSpan.output !== 'object') {
return [] as TraceSpan[]
}
const maybeChildSpans = (processedSpan.output as { childTraceSpans?: TraceSpan[] })
.childTraceSpans
if (!Array.isArray(maybeChildSpans) || maybeChildSpans.length === 0) {
return [] as TraceSpan[]
}
return flattenWorkflowChildren(maybeChildSpans)
})()
const mergedChildren = mergeTraceSpanChildren(normalizedChildren, outputChildSpans)
if (processedSpan.output && 'childTraceSpans' in processedSpan.output) {
const { childTraceSpans, ...cleanOutput } = processedSpan.output as {
childTraceSpans?: TraceSpan[]
} & Record<string, unknown>
processedSpan.output = cleanOutput
}
processedSpan.children = mergedChildren.length > 0 ? mergedChildren : undefined
return processedSpan
}

View File

@@ -8,7 +8,6 @@ const logger = createLogger('Redis')
const redisUrl = env.REDIS_URL
// Global Redis client for connection pooling
// This is important for serverless environments like Vercel
let globalRedisClient: Redis | null = null
// Fallback in-memory cache for when Redis is not available
@@ -18,7 +17,6 @@ const MAX_CACHE_SIZE = 1000
/**
* Get a Redis client instance
* Uses connection pooling to avoid creating a new connection for each request
* This is critical for performance in serverless environments like Vercel
*/
export function getRedisClient(): Redis | null {
// For server-side only

View File

@@ -75,12 +75,12 @@ export function parseResponseFormatSafely(responseFormatValue: any, blockId: str
*/
export function extractFieldValues(
parsedContent: any,
selectedOutputIds: string[],
selectedOutputs: string[],
blockId: string
): Record<string, any> {
const extractedValues: Record<string, any> = {}
for (const outputId of selectedOutputIds) {
for (const outputId of selectedOutputs) {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
if (blockIdForOutput !== blockId) {
@@ -90,18 +90,7 @@ export function extractFieldValues(
const path = extractPathFromOutputId(outputId, blockIdForOutput)
if (path) {
const pathParts = path.split('.')
let current = parsedContent
for (const part of pathParts) {
if (current && typeof current === 'object' && part in current) {
current = current[part]
} else {
current = undefined
break
}
}
const current = traverseObjectPathInternal(parsedContent, path)
if (current !== undefined) {
extractedValues[path] = current
}
@@ -165,8 +154,8 @@ export function parseOutputContentSafely(output: any): any {
/**
* Check if a set of output IDs contains response format selections for a specific block
*/
export function hasResponseFormatSelection(selectedOutputIds: string[], blockId: string): boolean {
return selectedOutputIds.some((outputId) => {
export function hasResponseFormatSelection(selectedOutputs: string[], blockId: string): boolean {
return selectedOutputs.some((outputId) => {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
return blockIdForOutput === blockId && outputId.includes('_')
})
@@ -175,11 +164,46 @@ export function hasResponseFormatSelection(selectedOutputIds: string[], blockId:
/**
* Get selected field names for a specific block from output IDs
*/
export function getSelectedFieldNames(selectedOutputIds: string[], blockId: string): string[] {
return selectedOutputIds
export function getSelectedFieldNames(selectedOutputs: string[], blockId: string): string[] {
return selectedOutputs
.filter((outputId) => {
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
return blockIdForOutput === blockId && outputId.includes('_')
})
.map((outputId) => extractPathFromOutputId(outputId, blockId))
}
/**
* Internal helper to traverse an object path without parsing
* @param obj The object to traverse
* @param path The dot-separated path (e.g., "result.data.value")
* @returns The value at the path, or undefined if path doesn't exist
*/
function traverseObjectPathInternal(obj: any, path: string): any {
if (!path) return obj
let current = obj
const parts = path.split('.')
for (const part of parts) {
if (current?.[part] !== undefined) {
current = current[part]
} else {
return undefined
}
}
return current
}
/**
* Traverses an object path safely, returning undefined if any part doesn't exist
* Automatically handles parsing of output content if needed
* @param obj The object to traverse (may contain unparsed content)
* @param path The dot-separated path (e.g., "result.data.value")
* @returns The value at the path, or undefined if path doesn't exist
*/
export function traverseObjectPath(obj: any, path: string): any {
const parsed = parseOutputContentSafely(obj)
return traverseObjectPathInternal(parsed, path)
}

View File

@@ -51,7 +51,6 @@ export const buildTimeCSPDirectives: CSPDirectives = {
'https://*.atlassian.com',
'https://cdn.discordapp.com',
'https://*.githubusercontent.com',
'https://*.public.blob.vercel-storage.com',
'https://*.s3.amazonaws.com',
'https://s3.amazonaws.com',
'https://github.com/*',
@@ -152,7 +151,7 @@ export function generateRuntimeCSP(): string {
default-src 'self';
script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.google.com https://apis.google.com;
style-src 'self' 'unsafe-inline' https://fonts.googleapis.com;
img-src 'self' data: blob: https://*.googleusercontent.com https://*.google.com https://*.atlassian.com https://cdn.discordapp.com https://*.githubusercontent.com https://*.public.blob.vercel-storage.com ${brandLogoDomain} ${brandFaviconDomain};
img-src 'self' data: blob: https://*.googleusercontent.com https://*.google.com https://*.atlassian.com https://cdn.discordapp.com https://*.githubusercontent.com ${brandLogoDomain} ${brandFaviconDomain};
media-src 'self' blob:;
font-src 'self' https://fonts.gstatic.com;
connect-src 'self' ${appUrl} ${ollamaUrl} ${socketUrl} ${socketWsUrl} https://api.browser-use.com https://api.exa.ai https://api.firecrawl.dev https://*.googleapis.com https://*.amazonaws.com https://*.s3.amazonaws.com https://*.blob.core.windows.net https://api.github.com https://github.com/* https://*.atlassian.com https://*.supabase.co ${dynamicDomainsStr};

View File

@@ -41,16 +41,6 @@ export function calculateStreamingCost(
const providerId = getProviderForTokenization(model)
logger.debug('Starting streaming cost calculation', {
model,
providerId,
inputLength: inputText.length,
outputLength: outputText.length,
hasSystemPrompt: !!systemPrompt,
hasContext: !!context,
hasMessages: !!messages?.length,
})
// Estimate input tokens (combine all input sources)
const inputEstimate = estimateInputTokens(systemPrompt, context, messages, providerId)

View File

@@ -5,7 +5,7 @@
import { createLogger } from '@/lib/logs/console/logger'
import { MIN_TEXT_LENGTH_FOR_ESTIMATION, TOKENIZATION_CONFIG } from '@/lib/tokenization/constants'
import type { TokenEstimate } from '@/lib/tokenization/types'
import { createTextPreview, getProviderConfig } from '@/lib/tokenization/utils'
import { getProviderConfig } from '@/lib/tokenization/utils'
const logger = createLogger('TokenizationEstimators')
@@ -25,13 +25,6 @@ export function estimateTokenCount(text: string, providerId?: string): TokenEsti
const effectiveProviderId = providerId || TOKENIZATION_CONFIG.defaults.provider
const config = getProviderConfig(effectiveProviderId)
logger.debug('Starting token estimation', {
provider: effectiveProviderId,
textLength: text.length,
preview: createTextPreview(text),
avgCharsPerToken: config.avgCharsPerToken,
})
let estimatedTokens: number
switch (effectiveProviderId) {
@@ -49,21 +42,12 @@ export function estimateTokenCount(text: string, providerId?: string): TokenEsti
estimatedTokens = estimateGenericTokens(text, config.avgCharsPerToken)
}
const result: TokenEstimate = {
return {
count: Math.max(1, Math.round(estimatedTokens)),
confidence: config.confidence,
provider: effectiveProviderId,
method: 'heuristic',
}
logger.debug('Token estimation completed', {
provider: effectiveProviderId,
textLength: text.length,
estimatedTokens: result.count,
confidence: result.confidence,
})
return result
}
/**

View File

@@ -27,20 +27,11 @@ export function processStreamingBlockLog(log: BlockLog, streamedContent: string)
// Check if we already have meaningful token/cost data
if (hasRealTokenData(log.output?.tokens) && hasRealCostData(log.output?.cost)) {
logger.debug(`Block ${log.blockId} already has real token/cost data`, {
blockType: log.blockType,
tokens: log.output?.tokens,
cost: log.output?.cost,
})
return false
}
// Check if we have content to tokenize
if (!streamedContent?.trim()) {
logger.debug(`Block ${log.blockId} has no content to tokenize`, {
blockType: log.blockType,
contentLength: streamedContent?.length || 0,
})
return false
}
@@ -51,14 +42,6 @@ export function processStreamingBlockLog(log: BlockLog, streamedContent: string)
// Prepare input text from log
const inputText = extractTextContent(log.input)
logger.debug(`Starting tokenization for streaming block ${log.blockId}`, {
blockType: log.blockType,
model,
inputLength: inputText.length,
outputLength: streamedContent.length,
hasInput: !!log.input,
})
// Calculate streaming cost
const result = calculateStreamingCost(
model,
@@ -136,11 +119,6 @@ export function processStreamingBlockLogs(
): number {
let processedCount = 0
logger.debug('Processing streaming block logs for tokenization', {
totalLogs: logs.length,
streamedBlocks: streamedContentMap.size,
})
for (const log of logs) {
const content = streamedContentMap.get(log.blockId)
if (content && processStreamingBlockLog(log, content)) {

View File

@@ -375,6 +375,25 @@ export function isValidName(name: string): boolean {
return /^[a-zA-Z0-9_\s]*$/.test(name)
}
export const SSE_HEADERS = {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
'X-Accel-Buffering': 'no',
} as const
/**
* Encodes data as a Server-Sent Events (SSE) message.
* Formats the data as a JSON string prefixed with "data:" and suffixed with two newlines,
* then encodes it as a Uint8Array for streaming.
*
* @param data - The data to encode and send via SSE
* @returns The encoded SSE message as a Uint8Array
*/
export function encodeSSE(data: any): Uint8Array {
return new TextEncoder().encode(`data: ${JSON.stringify(data)}\n\n`)
}
/**
* Gets a list of invalid characters in a name
*
@@ -386,19 +405,6 @@ export function getInvalidCharacters(name: string): string[] {
return invalidChars ? [...new Set(invalidChars)] : []
}
/**
* Get the full URL for an asset stored in Vercel Blob or local fallback
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
* - Otherwise falls back to local static assets served from root path
*/
export function getAssetUrl(filename: string) {
const cdnBaseUrl = env.NEXT_PUBLIC_BLOB_BASE_URL
if (cdnBaseUrl) {
return `${cdnBaseUrl}/${filename}`
}
return `/${filename}`
}
/**
* Generate a short request ID for correlation
*/

View File

@@ -2,6 +2,7 @@ import { db, webhook, workflow } from '@sim/db'
import { tasks } from '@trigger.dev/sdk'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getApiKeyOwnerUserId } from '@/lib/api-key/service'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { env, isTruthy } from '@/lib/env'
@@ -268,18 +269,25 @@ export async function checkRateLimits(
requestId: string
): Promise<NextResponse | null> {
try {
const userSubscription = await getHighestPrioritySubscription(foundWorkflow.userId)
const actorUserId = await getApiKeyOwnerUserId(foundWorkflow.pinnedApiKeyId)
if (!actorUserId) {
logger.warn(`[${requestId}] Webhook requires pinned API key to attribute usage`)
return NextResponse.json({ message: 'Pinned API key required' }, { status: 200 })
}
const userSubscription = await getHighestPrioritySubscription(actorUserId)
const rateLimiter = new RateLimiter()
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
foundWorkflow.userId,
actorUserId,
userSubscription,
'webhook',
true
)
if (!rateLimitCheck.allowed) {
logger.warn(`[${requestId}] Rate limit exceeded for webhook user ${foundWorkflow.userId}`, {
logger.warn(`[${requestId}] Rate limit exceeded for webhook user ${actorUserId}`, {
provider: foundWebhook.provider,
remaining: rateLimitCheck.remaining,
resetAt: rateLimitCheck.resetAt,
@@ -319,10 +327,17 @@ export async function checkUsageLimits(
}
try {
const usageCheck = await checkServerSideUsageLimits(foundWorkflow.userId)
const actorUserId = await getApiKeyOwnerUserId(foundWorkflow.pinnedApiKeyId)
if (!actorUserId) {
logger.warn(`[${requestId}] Webhook requires pinned API key to attribute usage`)
return NextResponse.json({ message: 'Pinned API key required' }, { status: 200 })
}
const usageCheck = await checkServerSideUsageLimits(actorUserId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${foundWorkflow.userId} has exceeded usage limits. Skipping webhook execution.`,
`[${requestId}] User ${actorUserId} has exceeded usage limits. Skipping webhook execution.`,
{
currentUsage: usageCheck.currentUsage,
limit: usageCheck.limit,
@@ -361,10 +376,16 @@ export async function queueWebhookExecution(
options: WebhookProcessorOptions
): Promise<NextResponse> {
try {
const actorUserId = await getApiKeyOwnerUserId(foundWorkflow.pinnedApiKeyId)
if (!actorUserId) {
logger.warn(`[${options.requestId}] Webhook requires pinned API key to attribute usage`)
return NextResponse.json({ message: 'Pinned API key required' }, { status: 200 })
}
const payload = {
webhookId: foundWebhook.id,
workflowId: foundWorkflow.id,
userId: foundWorkflow.userId,
userId: actorUserId,
provider: foundWebhook.provider,
body,
headers: Object.fromEntries(request.headers.entries()),

View File

@@ -0,0 +1,190 @@
import { createLogger } from '@/lib/logs/console/logger'
import { encodeSSE } from '@/lib/utils'
import type { ExecutionResult } from '@/executor/types'
const logger = createLogger('WorkflowStreaming')
export interface StreamingConfig {
selectedOutputs?: string[]
isSecureMode?: boolean
workflowTriggerType?: 'api' | 'chat'
onStream?: (streamingExec: {
stream: ReadableStream
execution?: { blockId?: string }
}) => Promise<void>
}
export interface StreamingResponseOptions {
requestId: string
workflow: { id: string; userId: string; isDeployed?: boolean }
input: any
executingUserId: string
streamConfig: StreamingConfig
createFilteredResult: (result: ExecutionResult) => any
}
export async function createStreamingResponse(
options: StreamingResponseOptions
): Promise<ReadableStream> {
const { requestId, workflow, input, executingUserId, streamConfig, createFilteredResult } =
options
const { executeWorkflow, createFilteredResult: defaultFilteredResult } = await import(
'@/app/api/workflows/[id]/execute/route'
)
const filterResultFn = createFilteredResult || defaultFilteredResult
return new ReadableStream({
async start(controller) {
try {
const streamedContent = new Map<string, string>()
const processedOutputs = new Set<string>()
const sendChunk = (blockId: string, content: string) => {
const separator = processedOutputs.size > 0 ? '\n\n' : ''
controller.enqueue(encodeSSE({ blockId, chunk: separator + content }))
processedOutputs.add(blockId)
}
const onStreamCallback = async (streamingExec: {
stream: ReadableStream
execution?: { blockId?: string }
}) => {
const blockId = streamingExec.execution?.blockId || 'unknown'
const reader = streamingExec.stream.getReader()
const decoder = new TextDecoder()
let isFirstChunk = true
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
const textChunk = decoder.decode(value, { stream: true })
streamedContent.set(blockId, (streamedContent.get(blockId) || '') + textChunk)
if (isFirstChunk) {
sendChunk(blockId, textChunk)
isFirstChunk = false
} else {
controller.enqueue(encodeSSE({ blockId, chunk: textChunk }))
}
}
} catch (streamError) {
logger.error(`[${requestId}] Error reading agent stream:`, streamError)
controller.enqueue(
encodeSSE({
event: 'stream_error',
blockId,
error: streamError instanceof Error ? streamError.message : 'Stream reading error',
})
)
}
}
const onBlockCompleteCallback = async (blockId: string, output: any) => {
if (!streamConfig.selectedOutputs?.length) return
const { extractBlockIdFromOutputId, extractPathFromOutputId, traverseObjectPath } =
await import('@/lib/response-format')
const matchingOutputs = streamConfig.selectedOutputs.filter(
(outputId) => extractBlockIdFromOutputId(outputId) === blockId
)
if (!matchingOutputs.length) return
for (const outputId of matchingOutputs) {
const path = extractPathFromOutputId(outputId, blockId)
// Response blocks have their data nested under 'response'
let outputValue = traverseObjectPath(output, path)
if (outputValue === undefined && output.response) {
outputValue = traverseObjectPath(output.response, path)
}
if (outputValue !== undefined) {
const formattedOutput =
typeof outputValue === 'string' ? outputValue : JSON.stringify(outputValue, null, 2)
sendChunk(blockId, formattedOutput)
}
}
}
const result = await executeWorkflow(workflow, requestId, input, executingUserId, {
enabled: true,
selectedOutputs: streamConfig.selectedOutputs,
isSecureMode: streamConfig.isSecureMode,
workflowTriggerType: streamConfig.workflowTriggerType,
onStream: onStreamCallback,
onBlockComplete: onBlockCompleteCallback,
})
if (result.logs && streamedContent.size > 0) {
result.logs = result.logs.map((log: any) => {
if (streamedContent.has(log.blockId)) {
const content = streamedContent.get(log.blockId)
if (log.output && content) {
return { ...log, output: { ...log.output, content } }
}
}
return log
})
const { processStreamingBlockLogs } = await import('@/lib/tokenization')
processStreamingBlockLogs(result.logs, streamedContent)
}
// Create a minimal result with only selected outputs
const minimalResult = {
success: result.success,
error: result.error,
output: {} as any,
}
// If there are selected outputs, only include those specific fields
if (streamConfig.selectedOutputs?.length && result.output) {
const { extractBlockIdFromOutputId, extractPathFromOutputId, traverseObjectPath } =
await import('@/lib/response-format')
for (const outputId of streamConfig.selectedOutputs) {
const blockId = extractBlockIdFromOutputId(outputId)
const path = extractPathFromOutputId(outputId, blockId)
// Find the output value from the result
if (result.logs) {
const blockLog = result.logs.find((log: any) => log.blockId === blockId)
if (blockLog?.output) {
// Response blocks have their data nested under 'response'
let value = traverseObjectPath(blockLog.output, path)
if (value === undefined && blockLog.output.response) {
value = traverseObjectPath(blockLog.output.response, path)
}
if (value !== undefined) {
// Store it in a structured way
if (!minimalResult.output[blockId]) {
minimalResult.output[blockId] = {}
}
minimalResult.output[blockId][path] = value
}
}
}
}
} else if (!streamConfig.selectedOutputs?.length) {
// No selected outputs means include the full output (but still filtered)
minimalResult.output = result.output
}
controller.enqueue(encodeSSE({ event: 'final', data: minimalResult }))
controller.enqueue(encodeSSE('[DONE]'))
controller.close()
} catch (error: any) {
logger.error(`[${requestId}] Stream error:`, error)
controller.enqueue(
encodeSSE({ event: 'error', error: error.message || 'Stream processing error' })
)
controller.close()
}
},
})
}

View File

@@ -1,5 +1,5 @@
import type { NextConfig } from 'next'
import { env, isTruthy } from './lib/env'
import { env, getEnv, isTruthy } from './lib/env'
import { isDev, isHosted } from './lib/environment'
import { getMainCSPPolicy, getWorkflowExecutionCSPPolicy } from './lib/security/csp'
@@ -20,7 +20,7 @@ const nextConfig: NextConfig = {
protocol: 'https',
hostname: '*.blob.core.windows.net',
},
// AWS S3 - various regions and bucket configurations
// AWS S3
{
protocol: 'https',
hostname: '*.s3.amazonaws.com',
@@ -33,23 +33,14 @@ const nextConfig: NextConfig = {
protocol: 'https',
hostname: 'lh3.googleusercontent.com',
},
// Custom domain for file storage if configured
...(env.NEXT_PUBLIC_BLOB_BASE_URL
? [
{
protocol: 'https' as const,
hostname: new URL(env.NEXT_PUBLIC_BLOB_BASE_URL).hostname,
},
]
: []),
// Brand logo domain if configured
...(env.NEXT_PUBLIC_BRAND_LOGO_URL
...(getEnv('NEXT_PUBLIC_BRAND_LOGO_URL')
? (() => {
try {
return [
{
protocol: 'https' as const,
hostname: new URL(env.NEXT_PUBLIC_BRAND_LOGO_URL).hostname,
hostname: new URL(getEnv('NEXT_PUBLIC_BRAND_LOGO_URL')!).hostname,
},
]
} catch {
@@ -58,13 +49,13 @@ const nextConfig: NextConfig = {
})()
: []),
// Brand favicon domain if configured
...(env.NEXT_PUBLIC_BRAND_FAVICON_URL
...(getEnv('NEXT_PUBLIC_BRAND_FAVICON_URL')
? (() => {
try {
return [
{
protocol: 'https' as const,
hostname: new URL(env.NEXT_PUBLIC_BRAND_FAVICON_URL).hostname,
hostname: new URL(getEnv('NEXT_PUBLIC_BRAND_FAVICON_URL')!).hostname,
},
]
} catch {

View File

@@ -214,6 +214,9 @@ export class Serializer {
if (block.triggerMode === true || isTriggerCategory) {
params.triggerMode = true
}
if (block.advancedMode === true) {
params.advancedMode = true
}
} catch (_) {
// no-op: conservative, avoid blocking serialization if blockConfig is unexpected
}
@@ -672,10 +675,10 @@ export class Serializer {
subBlocks,
outputs: serializedBlock.outputs,
enabled: true,
// Restore trigger mode from serialized params; treat trigger category as triggers as well
triggerMode:
serializedBlock.config?.params?.triggerMode === true ||
serializedBlock.metadata?.category === 'triggers',
advancedMode: serializedBlock.config?.params?.advancedMode === true,
}
}
}

Some files were not shown because too many files have changed in this diff Show More