mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-09 23:17:59 -05:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0f5ba75f1 | ||
|
|
5a943bca32 | ||
|
|
923595f57e | ||
|
|
241d9fd12d | ||
|
|
97a8778449 | ||
|
|
833e700b58 | ||
|
|
2d49892aaa | ||
|
|
8ce5a1b7c0 | ||
|
|
88d2e7b97b | ||
|
|
c04eb01aed | ||
|
|
5d887fdca7 | ||
|
|
1a0fdb32fe | ||
|
|
9d45b8df1e | ||
|
|
ae3a7f0865 | ||
|
|
25f5e31378 | ||
|
|
7bdf0e94d7 | ||
|
|
8e43774b5e | ||
|
|
715f42c1a6 | ||
|
|
8200e9a88f | ||
|
|
c6f6c9e2a5 | ||
|
|
2d7ba91c0e | ||
|
|
872e034312 | ||
|
|
a63a7b0262 | ||
|
|
991a020917 | ||
|
|
f03f395225 |
2
.github/workflows/docs-embeddings.yml
vendored
2
.github/workflows/docs-embeddings.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
|
||||
4
.github/workflows/i18n.yml
vendored
4
.github/workflows/i18n.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Run Lingo.dev translations
|
||||
env:
|
||||
@@ -116,7 +116,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
||||
2
.github/workflows/migrations.yml
vendored
2
.github/workflows/migrations.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install
|
||||
|
||||
2
.github/workflows/publish-cli.yml
vendored
2
.github/workflows/publish-cli.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Setup Node.js for npm publishing
|
||||
uses: actions/setup-node@v4
|
||||
|
||||
2
.github/workflows/publish-python-sdk.yml
vendored
2
.github/workflows/publish-python-sdk.yml
vendored
@@ -84,6 +84,6 @@ jobs:
|
||||
```
|
||||
|
||||
### Documentation
|
||||
See the [README](https://github.com/simstudio/sim/tree/main/packages/python-sdk) for usage instructions.
|
||||
See the [README](https://github.com/simstudioai/sim/tree/main/packages/python-sdk) or the [docs](https://docs.sim.ai/sdks/python) for more information.
|
||||
draft: false
|
||||
prerelease: false
|
||||
5
.github/workflows/publish-ts-sdk.yml
vendored
5
.github/workflows/publish-ts-sdk.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Setup Node.js for npm publishing
|
||||
uses: actions/setup-node@v4
|
||||
@@ -25,7 +25,6 @@ jobs:
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: packages/ts-sdk
|
||||
run: bun install
|
||||
|
||||
- name: Run tests
|
||||
@@ -80,6 +79,6 @@ jobs:
|
||||
```
|
||||
|
||||
### Documentation
|
||||
See the [README](https://github.com/simstudio/sim/tree/main/packages/ts-sdk) for usage instructions.
|
||||
See the [README](https://github.com/simstudioai/sim/tree/main/packages/ts-sdk) or the [docs](https://docs.sim.ai/sdks/typescript) for more information.
|
||||
draft: false
|
||||
prerelease: false
|
||||
2
.github/workflows/test-build.yml
vendored
2
.github/workflows/test-build.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
|
||||
2
.github/workflows/trigger-deploy.yml
vendored
2
.github/workflows/trigger-deploy.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.2.22
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use client'
|
||||
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { getVideoUrl } from '@/lib/utils'
|
||||
import { getAssetUrl } from '@/lib/utils'
|
||||
|
||||
interface LightboxProps {
|
||||
isOpen: boolean
|
||||
@@ -60,7 +60,7 @@ export function Lightbox({ isOpen, onClose, src, alt, type }: LightboxProps) {
|
||||
/>
|
||||
) : (
|
||||
<video
|
||||
src={getVideoUrl(src)}
|
||||
src={getAssetUrl(src)}
|
||||
autoPlay
|
||||
loop
|
||||
muted
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { getVideoUrl } from '@/lib/utils'
|
||||
import { getAssetUrl } from '@/lib/utils'
|
||||
import { Lightbox } from './lightbox'
|
||||
|
||||
interface VideoProps {
|
||||
@@ -39,7 +39,7 @@ export function Video({
|
||||
muted={muted}
|
||||
playsInline={playsInline}
|
||||
className={`${className} ${enableLightbox ? 'cursor-pointer transition-opacity hover:opacity-90' : ''}`}
|
||||
src={getVideoUrl(src)}
|
||||
src={getAssetUrl(src)}
|
||||
onClick={handleVideoClick}
|
||||
/>
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
Das offizielle Python SDK für Sim ermöglicht es Ihnen, Workflows programmatisch aus Ihren Python-Anwendungen mithilfe des offiziellen Python SDKs auszuführen.
|
||||
|
||||
<Callout type="info">
|
||||
Das Python SDK unterstützt Python 3.8+ und bietet synchrone Workflow-Ausführung. Alle Workflow-Ausführungen sind derzeit synchron.
|
||||
Das Python SDK unterstützt Python 3.8+ mit asynchroner Ausführungsunterstützung, automatischer Ratenbegrenzung mit exponentiellem Backoff und Nutzungsverfolgung.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
@@ -74,12 +74,17 @@ result = client.execute_workflow(
|
||||
- `workflow_id` (str): Die ID des auszuführenden Workflows
|
||||
- `input_data` (dict, optional): Eingabedaten, die an den Workflow übergeben werden
|
||||
- `timeout` (float, optional): Timeout in Sekunden (Standard: 30.0)
|
||||
- `stream` (bool, optional): Streaming-Antworten aktivieren (Standard: False)
|
||||
- `selected_outputs` (list[str], optional): Block-Ausgaben, die im `blockName.attribute`Format gestreamt werden sollen (z.B. `["agent1.content"]`)
|
||||
- `async_execution` (bool, optional): Asynchron ausführen (Standard: False)
|
||||
|
||||
**Rückgabewert:** `WorkflowExecutionResult`
|
||||
**Rückgabe:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Wenn `async_execution=True`, wird sofort mit einer Task-ID zum Abfragen zurückgegeben. Andernfalls wird auf den Abschluss gewartet.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
Ruft den Status eines Workflows ab (Deployment-Status usw.).
|
||||
Den Status eines Workflows abrufen (Bereitstellungsstatus usw.).
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
@@ -93,7 +98,7 @@ print("Is deployed:", status.is_deployed)
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Überprüft, ob ein Workflow für die Ausführung bereit ist.
|
||||
Überprüfen, ob ein Workflow für die Ausführung bereit ist.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
@@ -107,28 +112,118 @@ if is_ready:
|
||||
|
||||
**Rückgabe:** `bool`
|
||||
|
||||
##### execute_workflow_sync()
|
||||
##### get_job_status()
|
||||
|
||||
<Callout type="info">
|
||||
Derzeit ist diese Methode identisch mit `execute_workflow()`, da alle Ausführungen synchron sind. Diese Methode wird für zukünftige Kompatibilität bereitgestellt, wenn asynchrone Ausführung hinzugefügt wird.
|
||||
</Callout>
|
||||
|
||||
Führt einen Workflow aus (derzeit synchron, identisch mit `execute_workflow()`).
|
||||
Den Status einer asynchronen Job-Ausführung abrufen.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow_sync(
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `task_id` (str): Die Task-ID, die von der asynchronen Ausführung zurückgegeben wurde
|
||||
|
||||
**Rückgabe:** `Dict[str, Any]`
|
||||
|
||||
**Antwortfelder:**
|
||||
- `success` (bool): Ob die Anfrage erfolgreich war
|
||||
- `taskId` (str): Die Task-ID
|
||||
- `status` (str): Einer der Werte `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): Enthält `startedAt`, `completedAt` und `duration`
|
||||
- `output` (any, optional): Die Workflow-Ausgabe (wenn abgeschlossen)
|
||||
- `error` (any, optional): Fehlerdetails (wenn fehlgeschlagen)
|
||||
- `estimatedDuration` (int, optional): Geschätzte Dauer in Millisekunden (wenn in Bearbeitung/in Warteschlange)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Einen Workflow mit automatischer Wiederholung bei Ratenbegrenzungsfehlern unter Verwendung von exponentiellem Backoff ausführen.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"data": "some input"},
|
||||
timeout=60.0
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `workflow_id` (str): Die ID des auszuführenden Workflows
|
||||
- `input_data` (dict, optional): Eingabedaten, die an den Workflow übergeben werden
|
||||
- `timeout` (float): Timeout für die initiale Anfrage in Sekunden
|
||||
- `timeout` (float, optional): Timeout in Sekunden
|
||||
- `stream` (bool, optional): Streaming-Antworten aktivieren
|
||||
- `selected_outputs` (list, optional): Block-Ausgaben zum Streamen
|
||||
- `async_execution` (bool, optional): Asynchron ausführen
|
||||
- `max_retries` (int, optional): Maximale Anzahl von Wiederholungen (Standard: 3)
|
||||
- `initial_delay` (float, optional): Anfängliche Verzögerung in Sekunden (Standard: 1.0)
|
||||
- `max_delay` (float, optional): Maximale Verzögerung in Sekunden (Standard: 30.0)
|
||||
- `backoff_multiplier` (float, optional): Backoff-Multiplikator (Standard: 2.0)
|
||||
|
||||
**Rückgabe:** `WorkflowExecutionResult`
|
||||
**Rückgabewert:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Die Wiederholungslogik verwendet exponentielles Backoff (1s → 2s → 4s → 8s...) mit ±25% Jitter, um den Thundering-Herd-Effekt zu vermeiden. Wenn die API einen `retry-after` Header bereitstellt, wird dieser stattdessen verwendet.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Ruft die aktuellen Rate-Limit-Informationen aus der letzten API-Antwort ab.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Rückgabewert:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Ruft aktuelle Nutzungslimits und Kontingentinformationen für dein Konto ab.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Rückgabewert:** `UsageLimits`
|
||||
|
||||
**Antwortstruktur:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@@ -181,6 +288,27 @@ class WorkflowStatus:
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Häufige Fehlercodes:**
|
||||
- `UNAUTHORIZED`: Ungültiger API-Schlüssel
|
||||
- `TIMEOUT`: Zeitüberschreitung bei der Anfrage
|
||||
- `RATE_LIMIT_EXCEEDED`: Ratengrenze überschritten
|
||||
- `USAGE_LIMIT_EXCEEDED`: Nutzungsgrenze überschritten
|
||||
- `EXECUTION_ERROR`: Workflow-Ausführung fehlgeschlagen
|
||||
|
||||
## Beispiele
|
||||
|
||||
### Grundlegende Workflow-Ausführung
|
||||
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
@@ -252,7 +387,7 @@ Behandeln Sie verschiedene Fehlertypen, die während der Workflow-Ausführung au
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
@@ -279,16 +414,7 @@ def execute_with_error_handling():
|
||||
|
||||
Verwenden Sie den Client als Kontextmanager, um die Ressourcenbereinigung automatisch zu handhaben:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
---CODE-PLACEHOLDER-ef99d3dd509e04865d5b6b0e0e03d3f8---
|
||||
|
||||
### Batch-Workflow-Ausführung
|
||||
|
||||
@@ -298,7 +424,7 @@ Führen Sie mehrere Workflows effizient aus:
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
@@ -339,9 +465,233 @@ for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Asynchrone Workflow-Ausführung
|
||||
|
||||
Führen Sie Workflows asynchron für lang laufende Aufgaben aus:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Rate-Limiting und Wiederholungsversuche
|
||||
|
||||
Behandle Rate-Limits automatisch mit exponentiellem Backoff:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Nutzungsüberwachung
|
||||
|
||||
Überwache deine Kontonutzung und -limits:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Streaming-Workflow-Ausführung
|
||||
|
||||
Führe Workflows mit Echtzeit-Streaming-Antworten aus:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
Die Streaming-Antwort folgt dem Server-Sent Events (SSE) Format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flask-Streaming-Beispiel:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Umgebungskonfiguration
|
||||
|
||||
Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
Konfiguriere den Client mit Umgebungsvariablen:
|
||||
|
||||
<Tabs items={['Development', 'Production']}>
|
||||
<Tab value="Development">
|
||||
@@ -352,8 +702,8 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIMSTUDIO_API_KEY"),
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -365,13 +715,13 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIMSTUDIO_API_KEY")
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -382,19 +732,19 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
|
||||
<Steps>
|
||||
<Step title="Bei Sim anmelden">
|
||||
Navigieren Sie zu [Sim](https://sim.ai) und melden Sie sich bei Ihrem Konto an.
|
||||
Navigiere zu [Sim](https://sim.ai) und melde dich bei deinem Konto an.
|
||||
</Step>
|
||||
<Step title="Ihren Workflow öffnen">
|
||||
Navigieren Sie zu dem Workflow, den Sie programmatisch ausführen möchten.
|
||||
<Step title="Öffne deinen Workflow">
|
||||
Navigiere zu dem Workflow, den du programmatisch ausführen möchtest.
|
||||
</Step>
|
||||
<Step title="Ihren Workflow bereitstellen">
|
||||
Klicken Sie auf "Deploy", um Ihren Workflow bereitzustellen, falls dies noch nicht geschehen ist.
|
||||
<Step title="Deploye deinen Workflow">
|
||||
Klicke auf "Deploy", um deinen Workflow zu deployen, falls dies noch nicht geschehen ist.
|
||||
</Step>
|
||||
<Step title="API-Schlüssel erstellen oder auswählen">
|
||||
Wählen Sie während des Bereitstellungsprozesses einen API-Schlüssel aus oder erstellen Sie einen neuen.
|
||||
<Step title="Erstelle oder wähle einen API-Schlüssel">
|
||||
Wähle während des Deployment-Prozesses einen API-Schlüssel aus oder erstelle einen neuen.
|
||||
</Step>
|
||||
<Step title="API-Schlüssel kopieren">
|
||||
Kopieren Sie den API-Schlüssel zur Verwendung in Ihrer Python-Anwendung.
|
||||
<Step title="Kopiere den API-Schlüssel">
|
||||
Kopiere den API-Schlüssel zur Verwendung in deiner Python-Anwendung.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
Das offizielle TypeScript/JavaScript SDK für Sim bietet vollständige Typsicherheit und unterstützt sowohl Node.js- als auch Browser-Umgebungen, sodass Sie Workflows programmatisch aus Ihren Node.js-Anwendungen, Webanwendungen und anderen JavaScript-Umgebungen ausführen können. Alle Workflow-Ausführungen sind derzeit synchron.
|
||||
Das offizielle TypeScript/JavaScript SDK für Sim bietet vollständige Typsicherheit und unterstützt sowohl Node.js- als auch Browser-Umgebungen, sodass Sie Workflows programmatisch aus Ihren Node.js-Anwendungen, Webanwendungen und anderen JavaScript-Umgebungen ausführen können.
|
||||
|
||||
<Callout type="info">
|
||||
Das TypeScript SDK bietet vollständige Typsicherheit und unterstützt sowohl Node.js- als auch Browser-Umgebungen. Alle Workflow-Ausführungen sind derzeit synchron.
|
||||
Das TypeScript SDK bietet vollständige Typsicherheit, Unterstützung für asynchrone Ausführung, automatische Ratenbegrenzung mit exponentiellem Backoff und Nutzungsverfolgung.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- `options` (ExecutionOptions, optional):
|
||||
- `input` (any): Eingabedaten, die an den Workflow übergeben werden
|
||||
- `timeout` (number): Timeout in Millisekunden (Standard: 30000)
|
||||
- `stream` (boolean): Streaming-Antworten aktivieren (Standard: false)
|
||||
- `selectedOutputs` (string[]): Block-Ausgaben, die im `blockName.attribute`Format gestreamt werden sollen (z.B. `["agent1.content"]`)
|
||||
- `async` (boolean): Asynchron ausführen (Standard: false)
|
||||
|
||||
**Rückgabewert:** `Promise<WorkflowExecutionResult>`
|
||||
**Rückgabe:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
Wenn `async: true`, wird sofort mit einer Task-ID zum Abfragen zurückgegeben. Andernfalls wird auf den Abschluss gewartet.
|
||||
|
||||
##### getWorkflowStatus()
|
||||
|
||||
@@ -110,7 +115,7 @@ console.log('Is deployed:', status.isDeployed);
|
||||
**Parameter:**
|
||||
- `workflowId` (string): Die ID des Workflows
|
||||
|
||||
**Rückgabewert:** `Promise<WorkflowStatus>`
|
||||
**Rückgabe:** `Promise<WorkflowStatus>`
|
||||
|
||||
##### validateWorkflow()
|
||||
|
||||
@@ -126,34 +131,123 @@ if (isReady) {
|
||||
**Parameter:**
|
||||
- `workflowId` (string): Die ID des Workflows
|
||||
|
||||
**Rückgabewert:** `Promise<boolean>`
|
||||
**Rückgabe:** `Promise<boolean>`
|
||||
|
||||
##### executeWorkflowSync()
|
||||
##### getJobStatus()
|
||||
|
||||
<Callout type="info">
|
||||
Derzeit ist diese Methode identisch mit `executeWorkflow()`, da alle Ausführungen synchron sind. Diese Methode wird für zukünftige Kompatibilität bereitgestellt, wenn asynchrone Ausführung hinzugefügt wird.
|
||||
</Callout>
|
||||
|
||||
Einen Workflow ausführen (derzeit synchron, identisch mit `executeWorkflow()`).
|
||||
Den Status einer asynchronen Job-Ausführung abrufen.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWorkflowSync('workflow-id', {
|
||||
input: { data: 'some input' },
|
||||
timeout: 60000
|
||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
||||
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
|
||||
if (status.status === 'completed') {
|
||||
console.log('Output:', status.output);
|
||||
}
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `taskId` (string): Die Task-ID, die von der asynchronen Ausführung zurückgegeben wurde
|
||||
|
||||
**Rückgabe:** `Promise<JobStatus>`
|
||||
|
||||
**Antwortfelder:**
|
||||
- `success` (boolean): Ob die Anfrage erfolgreich war
|
||||
- `taskId` (string): Die Task-ID
|
||||
- `status` (string): Einer der Werte `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (object): Enthält `startedAt`, `completedAt` und `duration`
|
||||
- `output` (any, optional): Die Workflow-Ausgabe (wenn abgeschlossen)
|
||||
- `error` (any, optional): Fehlerdetails (wenn fehlgeschlagen)
|
||||
- `estimatedDuration` (number, optional): Geschätzte Dauer in Millisekunden (wenn in Bearbeitung/in der Warteschlange)
|
||||
|
||||
##### executeWithRetry()
|
||||
|
||||
Führt einen Workflow mit automatischer Wiederholung bei Ratenlimitfehlern unter Verwendung von exponentiellem Backoff aus.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Hello' },
|
||||
timeout: 30000
|
||||
}, {
|
||||
maxRetries: 3, // Maximum number of retries
|
||||
initialDelay: 1000, // Initial delay in ms (1 second)
|
||||
maxDelay: 30000, // Maximum delay in ms (30 seconds)
|
||||
backoffMultiplier: 2 // Exponential backoff multiplier
|
||||
});
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `workflowId` (string): Die ID des auszuführenden Workflows
|
||||
- `options` (ExecutionOptions, optional):
|
||||
- `input` (any): Eingabedaten, die an den Workflow übergeben werden
|
||||
- `timeout` (number): Timeout für die initiale Anfrage in Millisekunden
|
||||
- `options` (ExecutionOptions, optional): Gleich wie `executeWorkflow()`
|
||||
- `retryOptions` (RetryOptions, optional):
|
||||
- `maxRetries` (number): Maximale Anzahl von Wiederholungen (Standard: 3)
|
||||
- `initialDelay` (number): Anfängliche Verzögerung in ms (Standard: 1000)
|
||||
- `maxDelay` (number): Maximale Verzögerung in ms (Standard: 30000)
|
||||
- `backoffMultiplier` (number): Backoff-Multiplikator (Standard: 2)
|
||||
|
||||
**Rückgabewert:** `Promise<WorkflowExecutionResult>`
|
||||
**Rückgabewert:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
Die Wiederholungslogik verwendet exponentiellen Backoff (1s → 2s → 4s → 8s...) mit ±25% Jitter, um den Thundering-Herd-Effekt zu vermeiden. Wenn die API einen `retry-after`Header bereitstellt, wird dieser stattdessen verwendet.
|
||||
|
||||
##### getRateLimitInfo()
|
||||
|
||||
Ruft die aktuellen Ratenlimit-Informationen aus der letzten API-Antwort ab.
|
||||
|
||||
```typescript
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Limit:', rateLimitInfo.limit);
|
||||
console.log('Remaining:', rateLimitInfo.remaining);
|
||||
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
```
|
||||
|
||||
**Rückgabewert:** `RateLimitInfo | null`
|
||||
|
||||
##### getUsageLimits()
|
||||
|
||||
Ruft aktuelle Nutzungslimits und Kontingentinformationen für Ihr Konto ab.
|
||||
|
||||
```typescript
|
||||
const limits = await client.getUsageLimits();
|
||||
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
|
||||
console.log('Current period cost:', limits.usage.currentPeriodCost);
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
```
|
||||
|
||||
**Rückgabewert:** `Promise<UsageLimits>`
|
||||
|
||||
**Antwortstruktur:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
success: boolean
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
async: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
authType: string // 'api' or 'manual'
|
||||
}
|
||||
usage: {
|
||||
currentPeriodCost: number
|
||||
limit: number
|
||||
plan: string // e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### setApiKey()
|
||||
|
||||
Den API-Schlüssel aktualisieren.
|
||||
Aktualisiert den API-Schlüssel.
|
||||
|
||||
```typescript
|
||||
client.setApiKey('new-api-key');
|
||||
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
|
||||
|
||||
##### setBaseUrl()
|
||||
|
||||
Die Basis-URL aktualisieren.
|
||||
Aktualisiert die Basis-URL.
|
||||
|
||||
```typescript
|
||||
client.setBaseUrl('https://my-custom-domain.com');
|
||||
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
|
||||
}
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```typescript
|
||||
interface AsyncExecutionResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
status: 'queued';
|
||||
createdAt: string;
|
||||
links: {
|
||||
status: string; // e.g., "/api/jobs/{taskId}"
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```typescript
|
||||
@@ -198,6 +306,45 @@ interface WorkflowStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```typescript
|
||||
interface RateLimitInfo {
|
||||
limit: number;
|
||||
remaining: number;
|
||||
reset: number;
|
||||
retryAfter?: number;
|
||||
}
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```typescript
|
||||
interface UsageLimits {
|
||||
success: boolean;
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
async: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
authType: string;
|
||||
};
|
||||
usage: {
|
||||
currentPeriodCost: number;
|
||||
limit: number;
|
||||
plan: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```typescript
|
||||
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
|
||||
}
|
||||
```
|
||||
|
||||
**Häufige Fehlercodes:**
|
||||
- `UNAUTHORIZED`: Ungültiger API-Schlüssel
|
||||
- `TIMEOUT`: Zeitüberschreitung der Anfrage
|
||||
- `RATE_LIMIT_EXCEEDED`: Ratengrenze überschritten
|
||||
- `USAGE_LIMIT_EXCEEDED`: Nutzungsgrenze überschritten
|
||||
- `EXECUTION_ERROR`: Workflow-Ausführung fehlgeschlagen
|
||||
|
||||
## Beispiele
|
||||
|
||||
### Grundlegende Workflow-Ausführung
|
||||
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function runWorkflow() {
|
||||
@@ -271,7 +425,7 @@ Behandeln Sie verschiedene Fehlertypen, die während der Workflow-Ausführung au
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithErrorHandling() {
|
||||
@@ -315,14 +469,14 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Development configuration
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
|
||||
baseUrl: process.env.SIM_BASE_URL // optional
|
||||
});
|
||||
```
|
||||
|
||||
@@ -333,14 +487,14 @@ Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Production configuration with validation
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
|
||||
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
|
||||
});
|
||||
```
|
||||
|
||||
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const app = express();
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
app.use(express.json());
|
||||
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
export default async function handler(
|
||||
@@ -467,16 +621,16 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
|
||||
Bei der Verwendung des SDK im Browser sollten Sie darauf achten, keine sensiblen API-Schlüssel offenzulegen. Erwägen Sie die Verwendung eines Backend-Proxys oder öffentlicher API-Schlüssel mit eingeschränkten Berechtigungen.
|
||||
</Callout>
|
||||
|
||||
### React Hook Beispiel
|
||||
### React Hook-Beispiel
|
||||
|
||||
Erstellen Sie einen benutzerdefinierten React Hook für die Workflow-Ausführung:
|
||||
Erstellen eines benutzerdefinierten React-Hooks für die Workflow-Ausführung:
|
||||
|
||||
```typescript
|
||||
import { useState, useCallback } from 'react';
|
||||
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
interface UseWorkflowResult {
|
||||
@@ -532,7 +686,7 @@ function WorkflowComponent() {
|
||||
<button onClick={handleExecute} disabled={loading}>
|
||||
{loading ? 'Executing...' : 'Execute Workflow'}
|
||||
</button>
|
||||
|
||||
|
||||
{error && <div>Error: {error.message}</div>}
|
||||
{result && (
|
||||
<div>
|
||||
@@ -545,38 +699,267 @@ function WorkflowComponent() {
|
||||
}
|
||||
```
|
||||
|
||||
## Ihren API-Schlüssel erhalten
|
||||
### Asynchrone Workflow-Ausführung
|
||||
|
||||
Führen Sie Workflows asynchron für lang laufende Aufgaben aus:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeAsync() {
|
||||
try {
|
||||
// Start async execution
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { data: 'large dataset' },
|
||||
async: true // Execute asynchronously
|
||||
});
|
||||
|
||||
// Check if result is an async execution
|
||||
if ('taskId' in result) {
|
||||
console.log('Task ID:', result.taskId);
|
||||
console.log('Status endpoint:', result.links.status);
|
||||
|
||||
// Poll for completion
|
||||
let status = await client.getJobStatus(result.taskId);
|
||||
|
||||
while (status.status === 'queued' || status.status === 'processing') {
|
||||
console.log('Current status:', status.status);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
|
||||
status = await client.getJobStatus(result.taskId);
|
||||
}
|
||||
|
||||
if (status.status === 'completed') {
|
||||
console.log('Workflow completed!');
|
||||
console.log('Output:', status.output);
|
||||
console.log('Duration:', status.metadata.duration);
|
||||
} else {
|
||||
console.error('Workflow failed:', status.error);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
executeAsync();
|
||||
```
|
||||
|
||||
### Rate-Limiting und Wiederholungsversuche
|
||||
|
||||
Automatische Behandlung von Rate-Limits mit exponentiellem Backoff:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithRetryHandling() {
|
||||
try {
|
||||
// Automatically retries on rate limit
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Process this' }
|
||||
}, {
|
||||
maxRetries: 5,
|
||||
initialDelay: 1000,
|
||||
maxDelay: 60000,
|
||||
backoffMultiplier: 2
|
||||
});
|
||||
|
||||
console.log('Success:', result);
|
||||
} catch (error) {
|
||||
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
||||
console.error('Rate limit exceeded after all retries');
|
||||
|
||||
// Check rate limit info
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nutzungsüberwachung
|
||||
|
||||
Überwachen Sie Ihre Kontonutzung und -limits:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function checkUsage() {
|
||||
try {
|
||||
const limits = await client.getUsageLimits();
|
||||
|
||||
console.log('=== Rate Limits ===');
|
||||
console.log('Sync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.sync.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
|
||||
|
||||
console.log('\nAsync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.async.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.async.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.async.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.async.isLimited);
|
||||
|
||||
console.log('\n=== Usage ===');
|
||||
console.log('Current period cost:
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithStreaming() {
|
||||
try {
|
||||
// Streaming für bestimmte Block-Ausgaben aktivieren
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { message: 'Count to five' },
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content'] // Format blockName.attribute verwenden
|
||||
});
|
||||
|
||||
console.log('Workflow-Ergebnis:', result);
|
||||
} catch (error) {
|
||||
console.error('Fehler:', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", zwei"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**React Streaming Example:**
|
||||
|
||||
```typescript
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function StreamingWorkflow() {
|
||||
const [output, setOutput] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const executeStreaming = async () => {
|
||||
setLoading(true);
|
||||
setOutput('');
|
||||
|
||||
// WICHTIG: Führen Sie diesen API-Aufruf von Ihrem Backend-Server aus, nicht vom Browser
|
||||
// Setzen Sie niemals Ihren API-Schlüssel im Client-seitigen Code frei
|
||||
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY! // Nur serverseitige Umgebungsvariable
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: 'Generate a story',
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content']
|
||||
})
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (reader) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.chunk) {
|
||||
setOutput(prev => prev + parsed.chunk);
|
||||
} else if (parsed.event === 'done') {
|
||||
console.log('Ausführung abgeschlossen:', parsed.metadata);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ungültiges JSON überspringen
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<button onClick={executeStreaming} disabled={loading}>
|
||||
{loading ? 'Generiere...' : 'Streaming starten'}
|
||||
</button>
|
||||
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
<Step title="Bei Sim anmelden">
|
||||
Navigieren Sie zu [Sim](https://sim.ai) und melden Sie sich bei Ihrem Konto an.
|
||||
<Step title="Log in to Sim">
|
||||
Navigate to [Sim](https://sim.ai) and log in to your account.
|
||||
</Step>
|
||||
<Step title="Öffnen Sie Ihren Workflow">
|
||||
Navigieren Sie zu dem Workflow, den Sie programmatisch ausführen möchten.
|
||||
<Step title="Open your workflow">
|
||||
Navigate to the workflow you want to execute programmatically.
|
||||
</Step>
|
||||
<Step title="Deployen Sie Ihren Workflow">
|
||||
Klicken Sie auf "Deploy", um Ihren Workflow zu deployen, falls dies noch nicht geschehen ist.
|
||||
<Step title="Deploy your workflow">
|
||||
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
|
||||
</Step>
|
||||
<Step title="Erstellen oder wählen Sie einen API-Schlüssel">
|
||||
Wählen Sie während des Deployment-Prozesses einen API-Schlüssel aus oder erstellen Sie einen neuen.
|
||||
<Step title="Create or select an API key">
|
||||
During the deployment process, select or create an API key.
|
||||
</Step>
|
||||
<Step title="Kopieren Sie den API-Schlüssel">
|
||||
Kopieren Sie den API-Schlüssel zur Verwendung in Ihrer TypeScript/JavaScript-Anwendung.
|
||||
<Step title="Copy the API key">
|
||||
Copy the API key to use in your TypeScript/JavaScript application.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Callout type="warning">
|
||||
Halten Sie Ihren API-Schlüssel sicher und committen Sie ihn niemals in die Versionskontrolle. Verwenden Sie Umgebungsvariablen oder sicheres Konfigurationsmanagement.
|
||||
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
|
||||
</Callout>
|
||||
|
||||
## Anforderungen
|
||||
## Requirements
|
||||
|
||||
- Node.js 16+
|
||||
- TypeScript 5.0+ (für TypeScript-Projekte)
|
||||
- TypeScript 5.0+ (for TypeScript projects)
|
||||
|
||||
## TypeScript-Unterstützung
|
||||
## TypeScript Support
|
||||
|
||||
Das SDK ist in TypeScript geschrieben und bietet vollständige Typsicherheit:
|
||||
The SDK is written in TypeScript and provides full type safety:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
@@ -586,22 +969,22 @@ import {
|
||||
SimStudioError
|
||||
} from 'simstudio-ts-sdk';
|
||||
|
||||
// Type-safe client initialization
|
||||
// Typsichere Client-Initialisierung
|
||||
const client: SimStudioClient = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Type-safe workflow execution
|
||||
// Typsichere Workflow-Ausführung
|
||||
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
|
||||
input: {
|
||||
message: 'Hello, TypeScript!'
|
||||
}
|
||||
});
|
||||
|
||||
// Type-safe status checking
|
||||
// Typsichere Statusprüfung
|
||||
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
|
||||
```
|
||||
|
||||
## Lizenz
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
@@ -38,14 +38,92 @@ curl -X POST \
|
||||
|
||||
Erfolgreiche Antworten geben das serialisierte Ausführungsergebnis vom Executor zurück. Fehler zeigen Validierungs-, Authentifizierungs- oder Workflow-Fehler an.
|
||||
|
||||
## Ausgabe-Referenz
|
||||
## Streaming-Antworten
|
||||
|
||||
Aktivieren Sie Echtzeit-Streaming, um Workflow-Ausgaben zu erhalten, während sie zeichen-für-zeichen generiert werden. Dies ist nützlich, um KI-Antworten progressiv für Benutzer anzuzeigen.
|
||||
|
||||
### Anfrageparameter
|
||||
|
||||
Fügen Sie diese Parameter hinzu, um Streaming zu aktivieren:
|
||||
|
||||
- `stream` - Auf `true` setzen, um Server-Sent Events (SSE) Streaming zu aktivieren
|
||||
- `selectedOutputs` - Array von Block-Ausgaben zum Streamen (z.B. `["agent1.content"]`)
|
||||
|
||||
### Block-Ausgabeformat
|
||||
|
||||
Verwenden Sie das `blockName.attribute` Format, um anzugeben, welche Block-Ausgaben gestreamt werden sollen:
|
||||
- Format: `"blockName.attribute"` (z.B. Wenn Sie den Inhalt des Agent 1-Blocks streamen möchten, würden Sie `"agent1.content"` verwenden)
|
||||
- Blocknamen sind nicht case-sensitive und Leerzeichen werden ignoriert
|
||||
|
||||
### Beispielanfrage
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Count to five",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
### Antwortformat
|
||||
|
||||
Streaming-Antworten verwenden das Server-Sent Events (SSE) Format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
Jedes Ereignis enthält:
|
||||
- **Streaming-Chunks**: `{"blockId": "...", "chunk": "text"}` - Echtzeit-Text während er generiert wird
|
||||
- **Abschlussereignis**: `{"event": "done", ...}` - Ausführungsmetadaten und vollständige Ergebnisse
|
||||
- **Terminator**: `[DONE]` - Signalisiert das Ende des Streams
|
||||
|
||||
### Streaming mehrerer Blöcke
|
||||
|
||||
Wenn `selectedOutputs` mehrere Blöcke enthält, zeigt jeder Chunk an, welcher Block ihn erzeugt hat:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Process this request",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content", "agent2.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
Das Feld `blockId` in jedem Chunk ermöglicht es Ihnen, die Ausgabe zum richtigen UI-Element zu leiten:
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
|
||||
|
||||
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
|
||||
|
||||
data: {"blockId":"agent1-uuid","chunk":" complete"}
|
||||
```
|
||||
|
||||
## Ausgabereferenz
|
||||
|
||||
| Referenz | Beschreibung |
|
||||
|-----------|-------------|
|
||||
| `<api.field>` | Im Eingabeformat definiertes Feld |
|
||||
| `<api.input>` | Gesamter strukturierter Anfragekörper |
|
||||
|
||||
Wenn kein Eingabeformat definiert ist, stellt der Executor das rohe JSON nur unter `<api.input>` bereit.
|
||||
Wenn kein Eingabeformat definiert ist, stellt der Executor das rohe JSON nur unter `<api.input>` zur Verfügung.
|
||||
|
||||
<Callout type="warning">
|
||||
Ein Workflow kann nur einen API-Trigger enthalten. Veröffentlichen Sie nach Änderungen eine neue Bereitstellung, damit der Endpunkt aktuell bleibt.
|
||||
|
||||
@@ -166,6 +166,38 @@ Different subscription plans have different usage limits:
|
||||
| **Team** | $500 (pooled) | 50 sync, 100 async |
|
||||
| **Enterprise** | Custom | Custom |
|
||||
|
||||
## Billing Model
|
||||
|
||||
Sim uses a **base subscription + overage** billing model:
|
||||
|
||||
### How It Works
|
||||
|
||||
**Pro Plan ($20/month):**
|
||||
- Monthly subscription includes $20 of usage
|
||||
- Usage under $20 → No additional charges
|
||||
- Usage over $20 → Pay the overage at month end
|
||||
- Example: $35 usage = $20 (subscription) + $15 (overage)
|
||||
|
||||
**Team Plan ($40/seat/month):**
|
||||
- Pooled usage across all team members
|
||||
- Overage calculated from total team usage
|
||||
- Organization owner receives one bill
|
||||
|
||||
**Enterprise Plans:**
|
||||
- Fixed monthly price, no overages
|
||||
- Custom usage limits per agreement
|
||||
|
||||
### Threshold Billing
|
||||
|
||||
When unbilled overage reaches $50, Sim automatically bills the full unbilled amount.
|
||||
|
||||
**Example:**
|
||||
- Day 10: $70 overage → Bill $70 immediately
|
||||
- Day 15: Additional $35 usage ($105 total) → Already billed, no action
|
||||
- Day 20: Another $50 usage ($155 total, $85 unbilled) → Bill $85 immediately
|
||||
|
||||
This spreads large overage charges throughout the month instead of one large bill at period end.
|
||||
|
||||
## Cost Management Best Practices
|
||||
|
||||
1. **Monitor Regularly**: Check your usage dashboard frequently to avoid surprises
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
The official Python SDK for Sim allows you to execute workflows programmatically from your Python applications using the official Python SDK.
|
||||
|
||||
<Callout type="info">
|
||||
The Python SDK supports Python 3.8+ and provides synchronous workflow execution. All workflow executions are currently synchronous.
|
||||
The Python SDK supports Python 3.8+ with async execution support, automatic rate limiting with exponential backoff, and usage tracking.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
@@ -74,8 +74,13 @@ result = client.execute_workflow(
|
||||
- `workflow_id` (str): The ID of the workflow to execute
|
||||
- `input_data` (dict, optional): Input data to pass to the workflow
|
||||
- `timeout` (float, optional): Timeout in seconds (default: 30.0)
|
||||
- `stream` (bool, optional): Enable streaming responses (default: False)
|
||||
- `selected_outputs` (list[str], optional): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
|
||||
- `async_execution` (bool, optional): Execute asynchronously (default: False)
|
||||
|
||||
**Returns:** `WorkflowExecutionResult`
|
||||
**Returns:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
When `async_execution=True`, returns immediately with a task ID for polling. Otherwise, waits for completion.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
@@ -107,28 +112,117 @@ if is_ready:
|
||||
|
||||
**Returns:** `bool`
|
||||
|
||||
##### execute_workflow_sync()
|
||||
##### get_job_status()
|
||||
|
||||
<Callout type="info">
|
||||
Currently, this method is identical to `execute_workflow()` since all executions are synchronous. This method is provided for future compatibility when asynchronous execution is added.
|
||||
</Callout>
|
||||
|
||||
Execute a workflow (currently synchronous, same as `execute_workflow()`).
|
||||
Get the status of an async job execution.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow_sync(
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `task_id` (str): The task ID returned from async execution
|
||||
|
||||
**Returns:** `Dict[str, Any]`
|
||||
|
||||
**Response fields:**
|
||||
- `success` (bool): Whether the request was successful
|
||||
- `taskId` (str): The task ID
|
||||
- `status` (str): One of `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): Contains `startedAt`, `completedAt`, and `duration`
|
||||
- `output` (any, optional): The workflow output (when completed)
|
||||
- `error` (any, optional): Error details (when failed)
|
||||
- `estimatedDuration` (int, optional): Estimated duration in milliseconds (when processing/queued)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Execute a workflow with automatic retry on rate limit errors using exponential backoff.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"data": "some input"},
|
||||
timeout=60.0
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `workflow_id` (str): The ID of the workflow to execute
|
||||
- `input_data` (dict, optional): Input data to pass to the workflow
|
||||
- `timeout` (float): Timeout for the initial request in seconds
|
||||
- `timeout` (float, optional): Timeout in seconds
|
||||
- `stream` (bool, optional): Enable streaming responses
|
||||
- `selected_outputs` (list, optional): Block outputs to stream
|
||||
- `async_execution` (bool, optional): Execute asynchronously
|
||||
- `max_retries` (int, optional): Maximum number of retries (default: 3)
|
||||
- `initial_delay` (float, optional): Initial delay in seconds (default: 1.0)
|
||||
- `max_delay` (float, optional): Maximum delay in seconds (default: 30.0)
|
||||
- `backoff_multiplier` (float, optional): Backoff multiplier (default: 2.0)
|
||||
|
||||
**Returns:** `WorkflowExecutionResult`
|
||||
**Returns:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
The retry logic uses exponential backoff (1s → 2s → 4s → 8s...) with ±25% jitter to prevent thundering herd. If the API provides a `retry-after` header, it will be used instead.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Get the current rate limit information from the last API response.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Returns:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Get current usage limits and quota information for your account.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Returns:** `UsageLimits`
|
||||
|
||||
**Response structure:**
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
@@ -170,6 +264,18 @@ class WorkflowExecutionResult:
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@@ -181,6 +287,27 @@ class WorkflowStatus:
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
@@ -191,6 +318,13 @@ class SimStudioError(Exception):
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Common error codes:**
|
||||
- `UNAUTHORIZED`: Invalid API key
|
||||
- `TIMEOUT`: Request timed out
|
||||
- `RATE_LIMIT_EXCEEDED`: Rate limit exceeded
|
||||
- `USAGE_LIMIT_EXCEEDED`: Usage limit exceeded
|
||||
- `EXECUTION_ERROR`: Workflow execution failed
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Workflow Execution
|
||||
@@ -214,7 +348,7 @@ class SimStudioError(Exception):
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
@@ -252,7 +386,7 @@ Handle different types of errors that may occur during workflow execution:
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
@@ -284,7 +418,7 @@ from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
@@ -298,7 +432,7 @@ Execute multiple workflows efficiently:
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
@@ -339,6 +473,230 @@ for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Async Workflow Execution
|
||||
|
||||
Execute workflows asynchronously for long-running tasks:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Rate Limiting and Retry
|
||||
|
||||
Handle rate limits automatically with exponential backoff:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Usage Monitoring
|
||||
|
||||
Monitor your account usage and limits:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flask Streaming Example:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Environment Configuration
|
||||
|
||||
Configure the client using environment variables:
|
||||
@@ -351,8 +709,8 @@ Configure the client using environment variables:
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIMSTUDIO_API_KEY"),
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
@@ -362,13 +720,13 @@ Configure the client using environment variables:
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIMSTUDIO_API_KEY")
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
The official TypeScript/JavaScript SDK for Sim provides full type safety and supports both Node.js and browser environments, allowing you to execute workflows programmatically from your Node.js applications, web applications, and other JavaScript environments. All workflow executions are currently synchronous.
|
||||
The official TypeScript/JavaScript SDK for Sim provides full type safety and supports both Node.js and browser environments, allowing you to execute workflows programmatically from your Node.js applications, web applications, and other JavaScript environments.
|
||||
|
||||
<Callout type="info">
|
||||
The TypeScript SDK provides full type safety and supports both Node.js and browser environments. All workflow executions are currently synchronous.
|
||||
The TypeScript SDK provides full type safety, async execution support, automatic rate limiting with exponential backoff, and usage tracking.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
@@ -89,8 +89,13 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- `options` (ExecutionOptions, optional):
|
||||
- `input` (any): Input data to pass to the workflow
|
||||
- `timeout` (number): Timeout in milliseconds (default: 30000)
|
||||
- `stream` (boolean): Enable streaming responses (default: false)
|
||||
- `selectedOutputs` (string[]): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
|
||||
- `async` (boolean): Execute asynchronously (default: false)
|
||||
|
||||
**Returns:** `Promise<WorkflowExecutionResult>`
|
||||
**Returns:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
When `async: true`, returns immediately with a task ID for polling. Otherwise, waits for completion.
|
||||
|
||||
##### getWorkflowStatus()
|
||||
|
||||
@@ -122,28 +127,116 @@ if (isReady) {
|
||||
|
||||
**Returns:** `Promise<boolean>`
|
||||
|
||||
##### executeWorkflowSync()
|
||||
##### getJobStatus()
|
||||
|
||||
<Callout type="info">
|
||||
Currently, this method is identical to `executeWorkflow()` since all executions are synchronous. This method is provided for future compatibility when asynchronous execution is added.
|
||||
</Callout>
|
||||
|
||||
Execute a workflow (currently synchronous, same as `executeWorkflow()`).
|
||||
Get the status of an async job execution.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWorkflowSync('workflow-id', {
|
||||
input: { data: 'some input' },
|
||||
timeout: 60000
|
||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
||||
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
|
||||
if (status.status === 'completed') {
|
||||
console.log('Output:', status.output);
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `taskId` (string): The task ID returned from async execution
|
||||
|
||||
**Returns:** `Promise<JobStatus>`
|
||||
|
||||
**Response fields:**
|
||||
- `success` (boolean): Whether the request was successful
|
||||
- `taskId` (string): The task ID
|
||||
- `status` (string): One of `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (object): Contains `startedAt`, `completedAt`, and `duration`
|
||||
- `output` (any, optional): The workflow output (when completed)
|
||||
- `error` (any, optional): Error details (when failed)
|
||||
- `estimatedDuration` (number, optional): Estimated duration in milliseconds (when processing/queued)
|
||||
|
||||
##### executeWithRetry()
|
||||
|
||||
Execute a workflow with automatic retry on rate limit errors using exponential backoff.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Hello' },
|
||||
timeout: 30000
|
||||
}, {
|
||||
maxRetries: 3, // Maximum number of retries
|
||||
initialDelay: 1000, // Initial delay in ms (1 second)
|
||||
maxDelay: 30000, // Maximum delay in ms (30 seconds)
|
||||
backoffMultiplier: 2 // Exponential backoff multiplier
|
||||
});
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `workflowId` (string): The ID of the workflow to execute
|
||||
- `options` (ExecutionOptions, optional):
|
||||
- `input` (any): Input data to pass to the workflow
|
||||
- `timeout` (number): Timeout for the initial request in milliseconds
|
||||
- `options` (ExecutionOptions, optional): Same as `executeWorkflow()`
|
||||
- `retryOptions` (RetryOptions, optional):
|
||||
- `maxRetries` (number): Maximum number of retries (default: 3)
|
||||
- `initialDelay` (number): Initial delay in ms (default: 1000)
|
||||
- `maxDelay` (number): Maximum delay in ms (default: 30000)
|
||||
- `backoffMultiplier` (number): Backoff multiplier (default: 2)
|
||||
|
||||
**Returns:** `Promise<WorkflowExecutionResult>`
|
||||
**Returns:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
The retry logic uses exponential backoff (1s → 2s → 4s → 8s...) with ±25% jitter to prevent thundering herd. If the API provides a `retry-after` header, it will be used instead.
|
||||
|
||||
##### getRateLimitInfo()
|
||||
|
||||
Get the current rate limit information from the last API response.
|
||||
|
||||
```typescript
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Limit:', rateLimitInfo.limit);
|
||||
console.log('Remaining:', rateLimitInfo.remaining);
|
||||
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
```
|
||||
|
||||
**Returns:** `RateLimitInfo | null`
|
||||
|
||||
##### getUsageLimits()
|
||||
|
||||
Get current usage limits and quota information for your account.
|
||||
|
||||
```typescript
|
||||
const limits = await client.getUsageLimits();
|
||||
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
|
||||
console.log('Current period cost:', limits.usage.currentPeriodCost);
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
```
|
||||
|
||||
**Returns:** `Promise<UsageLimits>`
|
||||
|
||||
**Response structure:**
|
||||
```typescript
|
||||
{
|
||||
success: boolean
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
async: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
authType: string // 'api' or 'manual'
|
||||
}
|
||||
usage: {
|
||||
currentPeriodCost: number
|
||||
limit: number
|
||||
plan: string // e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### setApiKey()
|
||||
|
||||
@@ -181,6 +274,20 @@ interface WorkflowExecutionResult {
|
||||
}
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```typescript
|
||||
interface AsyncExecutionResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
status: 'queued';
|
||||
createdAt: string;
|
||||
links: {
|
||||
status: string; // e.g., "/api/jobs/{taskId}"
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```typescript
|
||||
@@ -192,6 +299,45 @@ interface WorkflowStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```typescript
|
||||
interface RateLimitInfo {
|
||||
limit: number;
|
||||
remaining: number;
|
||||
reset: number;
|
||||
retryAfter?: number;
|
||||
}
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```typescript
|
||||
interface UsageLimits {
|
||||
success: boolean;
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
async: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
authType: string;
|
||||
};
|
||||
usage: {
|
||||
currentPeriodCost: number;
|
||||
limit: number;
|
||||
plan: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```typescript
|
||||
@@ -201,6 +347,13 @@ class SimStudioError extends Error {
|
||||
}
|
||||
```
|
||||
|
||||
**Common error codes:**
|
||||
- `UNAUTHORIZED`: Invalid API key
|
||||
- `TIMEOUT`: Request timed out
|
||||
- `RATE_LIMIT_EXCEEDED`: Rate limit exceeded
|
||||
- `USAGE_LIMIT_EXCEEDED`: Usage limit exceeded
|
||||
- `EXECUTION_ERROR`: Workflow execution failed
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Workflow Execution
|
||||
@@ -224,7 +377,7 @@ class SimStudioError extends Error {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function runWorkflow() {
|
||||
@@ -265,7 +418,7 @@ Handle different types of errors that may occur during workflow execution:
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithErrorHandling() {
|
||||
@@ -308,14 +461,14 @@ Configure the client using environment variables:
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Development configuration
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
|
||||
baseUrl: process.env.SIM_BASE_URL // optional
|
||||
});
|
||||
```
|
||||
</Tab>
|
||||
@@ -324,14 +477,14 @@ Configure the client using environment variables:
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Production configuration with validation
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
|
||||
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
|
||||
});
|
||||
```
|
||||
</Tab>
|
||||
@@ -347,7 +500,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const app = express();
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
app.use(express.json());
|
||||
@@ -389,7 +542,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
export default async function handler(
|
||||
@@ -440,14 +593,91 @@ async function executeClientSideWorkflow() {
|
||||
});
|
||||
|
||||
console.log('Workflow result:', result);
|
||||
|
||||
|
||||
// Update UI with result
|
||||
document.getElementById('result')!.textContent =
|
||||
document.getElementById('result')!.textContent =
|
||||
JSON.stringify(result.output, null, 2);
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### File Upload
|
||||
|
||||
File objects are automatically detected and converted to base64 format. Include them in your input under the field name matching your workflow's API trigger input format.
|
||||
|
||||
The SDK converts File objects to this format:
|
||||
```typescript
|
||||
{
|
||||
type: 'file',
|
||||
data: 'data:mime/type;base64,base64data',
|
||||
name: 'filename',
|
||||
mime: 'mime/type'
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively, you can manually provide files using the URL format:
|
||||
```typescript
|
||||
{
|
||||
type: 'url',
|
||||
data: 'https://example.com/file.pdf',
|
||||
name: 'file.pdf',
|
||||
mime: 'application/pdf'
|
||||
}
|
||||
```
|
||||
|
||||
<Tabs items={['Browser', 'Node.js']}>
|
||||
<Tab value="Browser">
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIM_API_KEY!
|
||||
});
|
||||
|
||||
// From file input
|
||||
async function handleFileUpload(event: Event) {
|
||||
const input = event.target as HTMLInputElement;
|
||||
const files = Array.from(input.files || []);
|
||||
|
||||
// Include files under the field name from your API trigger's input format
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: {
|
||||
documents: files, // Must match your workflow's "files" field name
|
||||
instructions: 'Analyze these documents'
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Result:', result);
|
||||
}
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Node.js">
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
import fs from 'fs';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Read file and create File object
|
||||
const fileBuffer = fs.readFileSync('./document.pdf');
|
||||
const file = new File([fileBuffer], 'document.pdf', {
|
||||
type: 'application/pdf'
|
||||
});
|
||||
|
||||
// Include files under the field name from your API trigger's input format
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: {
|
||||
documents: [file], // Must match your workflow's "files" field name
|
||||
query: 'Summarize this document'
|
||||
}
|
||||
});
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
// Attach to button click
|
||||
document.getElementById('executeBtn')?.addEventListener('click', executeClientSideWorkflow);
|
||||
@@ -466,7 +696,7 @@ import { useState, useCallback } from 'react';
|
||||
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
interface UseWorkflowResult {
|
||||
@@ -522,7 +752,7 @@ function WorkflowComponent() {
|
||||
<button onClick={handleExecute} disabled={loading}>
|
||||
{loading ? 'Executing...' : 'Execute Workflow'}
|
||||
</button>
|
||||
|
||||
|
||||
{error && <div>Error: {error.message}</div>}
|
||||
{result && (
|
||||
<div>
|
||||
@@ -535,6 +765,251 @@ function WorkflowComponent() {
|
||||
}
|
||||
```
|
||||
|
||||
### Async Workflow Execution
|
||||
|
||||
Execute workflows asynchronously for long-running tasks:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeAsync() {
|
||||
try {
|
||||
// Start async execution
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { data: 'large dataset' },
|
||||
async: true // Execute asynchronously
|
||||
});
|
||||
|
||||
// Check if result is an async execution
|
||||
if ('taskId' in result) {
|
||||
console.log('Task ID:', result.taskId);
|
||||
console.log('Status endpoint:', result.links.status);
|
||||
|
||||
// Poll for completion
|
||||
let status = await client.getJobStatus(result.taskId);
|
||||
|
||||
while (status.status === 'queued' || status.status === 'processing') {
|
||||
console.log('Current status:', status.status);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
|
||||
status = await client.getJobStatus(result.taskId);
|
||||
}
|
||||
|
||||
if (status.status === 'completed') {
|
||||
console.log('Workflow completed!');
|
||||
console.log('Output:', status.output);
|
||||
console.log('Duration:', status.metadata.duration);
|
||||
} else {
|
||||
console.error('Workflow failed:', status.error);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
executeAsync();
|
||||
```
|
||||
|
||||
### Rate Limiting and Retry
|
||||
|
||||
Handle rate limits automatically with exponential backoff:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithRetryHandling() {
|
||||
try {
|
||||
// Automatically retries on rate limit
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Process this' }
|
||||
}, {
|
||||
maxRetries: 5,
|
||||
initialDelay: 1000,
|
||||
maxDelay: 60000,
|
||||
backoffMultiplier: 2
|
||||
});
|
||||
|
||||
console.log('Success:', result);
|
||||
} catch (error) {
|
||||
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
||||
console.error('Rate limit exceeded after all retries');
|
||||
|
||||
// Check rate limit info
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Usage Monitoring
|
||||
|
||||
Monitor your account usage and limits:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function checkUsage() {
|
||||
try {
|
||||
const limits = await client.getUsageLimits();
|
||||
|
||||
console.log('=== Rate Limits ===');
|
||||
console.log('Sync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.sync.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
|
||||
|
||||
console.log('\nAsync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.async.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.async.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.async.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.async.isLimited);
|
||||
|
||||
console.log('\n=== Usage ===');
|
||||
console.log('Current period cost: $' + limits.usage.currentPeriodCost.toFixed(2));
|
||||
console.log('Limit: $' + limits.usage.limit.toFixed(2));
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
|
||||
const percentUsed = (limits.usage.currentPeriodCost / limits.usage.limit) * 100;
|
||||
console.log('Usage: ' + percentUsed.toFixed(1) + '%');
|
||||
|
||||
if (percentUsed > 80) {
|
||||
console.warn('⚠️ Warning: You are approaching your usage limit!');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error checking usage:', error);
|
||||
}
|
||||
}
|
||||
|
||||
checkUsage();
|
||||
```
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithStreaming() {
|
||||
try {
|
||||
// Enable streaming for specific block outputs
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { message: 'Count to five' },
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content'] // Use blockName.attribute format
|
||||
});
|
||||
|
||||
console.log('Workflow result:', result);
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**React Streaming Example:**
|
||||
|
||||
```typescript
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function StreamingWorkflow() {
|
||||
const [output, setOutput] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const executeStreaming = async () => {
|
||||
setLoading(true);
|
||||
setOutput('');
|
||||
|
||||
// IMPORTANT: Make this API call from your backend server, not the browser
|
||||
// Never expose your API key in client-side code
|
||||
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: 'Generate a story',
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content']
|
||||
})
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (reader) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.chunk) {
|
||||
setOutput(prev => prev + parsed.chunk);
|
||||
} else if (parsed.event === 'done') {
|
||||
console.log('Execution complete:', parsed.metadata);
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip invalid JSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<button onClick={executeStreaming} disabled={loading}>
|
||||
{loading ? 'Generating...' : 'Start Streaming'}
|
||||
</button>
|
||||
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
@@ -578,7 +1053,7 @@ import {
|
||||
|
||||
// Type-safe client initialization
|
||||
const client: SimStudioClient = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Type-safe workflow execution
|
||||
@@ -594,4 +1069,4 @@ const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
|
||||
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
Apache-2.0
|
||||
|
||||
@@ -22,9 +22,17 @@ The API trigger exposes your workflow as a secure HTTP endpoint. Send JSON data
|
||||
/>
|
||||
</div>
|
||||
|
||||
Add an **Input Format** field for each parameter. Runtime output keys mirror the schema and are also available under `<api.input>`.
|
||||
Add an **Input Format** field for each parameter. Supported types:
|
||||
|
||||
Manual runs in the editor use the `value` column so you can test without sending a request. During execution the resolver populates both `<api.userId>` and `<api.input.userId>`.
|
||||
- **string** - Text values
|
||||
- **number** - Numeric values
|
||||
- **boolean** - True/false values
|
||||
- **json** - JSON objects
|
||||
- **files** - File uploads (access via `<api.fieldName[0].url>`, `<api.fieldName[0].name>`, etc.)
|
||||
|
||||
Runtime output keys mirror the schema and are available under `<api.input>`.
|
||||
|
||||
Manual runs in the editor use the `value` column so you can test without sending a request. During execution the resolver populates both `<api.fieldName>` and `<api.input.fieldName>`.
|
||||
|
||||
## Request Example
|
||||
|
||||
@@ -38,6 +46,84 @@ curl -X POST \
|
||||
|
||||
Successful responses return the serialized execution result from the Executor. Errors surface validation, auth, or workflow failures.
|
||||
|
||||
## Streaming Responses
|
||||
|
||||
Enable real-time streaming to receive workflow output as it's generated, character-by-character. This is useful for displaying AI responses progressively to users.
|
||||
|
||||
### Request Parameters
|
||||
|
||||
Add these parameters to enable streaming:
|
||||
|
||||
- `stream` - Set to `true` to enable Server-Sent Events (SSE) streaming
|
||||
- `selectedOutputs` - Array of block outputs to stream (e.g., `["agent1.content"]`)
|
||||
|
||||
### Block Output Format
|
||||
|
||||
Use the `blockName.attribute` format to specify which block outputs to stream:
|
||||
- Format: `"blockName.attribute"` (e.g., If you want to stream the content of the Agent 1 block, you would use `"agent1.content"`)
|
||||
- Block names are case-insensitive and spaces are ignored
|
||||
|
||||
### Example Request
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Count to five",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
### Response Format
|
||||
|
||||
Streaming responses use Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
Each event includes:
|
||||
- **Streaming chunks**: `{"blockId": "...", "chunk": "text"}` - Real-time text as it's generated
|
||||
- **Final event**: `{"event": "done", ...}` - Execution metadata and complete results
|
||||
- **Terminator**: `[DONE]` - Signals end of stream
|
||||
|
||||
### Multiple Block Streaming
|
||||
|
||||
When `selectedOutputs` includes multiple blocks, each chunk indicates which block produced it:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Process this request",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content", "agent2.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
The `blockId` field in each chunk lets you route output to the correct UI element:
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
|
||||
|
||||
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
|
||||
|
||||
data: {"blockId":"agent1-uuid","chunk":" complete"}
|
||||
```
|
||||
|
||||
## Output Reference
|
||||
|
||||
| Reference | Description |
|
||||
@@ -45,6 +131,53 @@ Successful responses return the serialized execution result from the Executor. E
|
||||
| `<api.field>` | Field defined in the Input Format |
|
||||
| `<api.input>` | Entire structured request body |
|
||||
|
||||
### File Upload Format
|
||||
|
||||
The API accepts files in two formats:
|
||||
|
||||
**1. Base64-encoded files** (recommended for SDKs):
|
||||
```json
|
||||
{
|
||||
"documents": [{
|
||||
"type": "file",
|
||||
"data": "data:application/pdf;base64,JVBERi0xLjQK...",
|
||||
"name": "document.pdf",
|
||||
"mime": "application/pdf"
|
||||
}]
|
||||
}
|
||||
```
|
||||
- Maximum file size: 20MB per file
|
||||
- Files are uploaded to cloud storage and converted to UserFile objects with all properties
|
||||
|
||||
**2. Direct URL references**:
|
||||
```json
|
||||
{
|
||||
"documents": [{
|
||||
"type": "url",
|
||||
"data": "https://example.com/document.pdf",
|
||||
"name": "document.pdf",
|
||||
"mime": "application/pdf"
|
||||
}]
|
||||
}
|
||||
```
|
||||
- File is not uploaded, URL is passed through directly
|
||||
- Useful for referencing existing files
|
||||
|
||||
### File Properties
|
||||
|
||||
For files, access all properties:
|
||||
|
||||
| Property | Description | Type |
|
||||
|----------|-------------|------|
|
||||
| `<api.fieldName[0].url>` | Signed download URL | string |
|
||||
| `<api.fieldName[0].name>` | Original filename | string |
|
||||
| `<api.fieldName[0].size>` | File size in bytes | number |
|
||||
| `<api.fieldName[0].type>` | MIME type | string |
|
||||
| `<api.fieldName[0].uploadedAt>` | Upload timestamp (ISO 8601) | string |
|
||||
| `<api.fieldName[0].expiresAt>` | URL expiry timestamp (ISO 8601) | string |
|
||||
|
||||
For URL-referenced files, the same properties are available except `uploadedAt` and `expiresAt` since the file is not uploaded to our storage.
|
||||
|
||||
If no Input Format is defined, the executor exposes the raw JSON at `<api.input>` only.
|
||||
|
||||
<Callout type="warning">
|
||||
|
||||
@@ -24,13 +24,24 @@ The Chat trigger creates a conversational interface for your workflow. Deploy yo
|
||||
|
||||
The trigger writes three fields that downstream blocks can reference:
|
||||
|
||||
| Reference | Description |
|
||||
|-----------|-------------|
|
||||
| `<chat.input>` | Latest user message |
|
||||
| `<chat.conversationId>` | Conversation thread ID |
|
||||
| `<chat.files>` | Optional uploaded files |
|
||||
| Reference | Description | Type |
|
||||
|-----------|-------------|------|
|
||||
| `<chat.input>` | Latest user message | string |
|
||||
| `<chat.conversationId>` | Conversation thread ID | string |
|
||||
| `<chat.files>` | Optional uploaded files | files array |
|
||||
|
||||
Files include `name`, `mimeType`, and a signed download `url`.
|
||||
### File Properties
|
||||
|
||||
Access individual file properties using array indexing:
|
||||
|
||||
| Property | Description | Type |
|
||||
|----------|-------------|------|
|
||||
| `<chat.files[0].url>` | Signed download URL | string |
|
||||
| `<chat.files[0].name>` | Original filename | string |
|
||||
| `<chat.files[0].size>` | File size in bytes | number |
|
||||
| `<chat.files[0].type>` | MIME type | string |
|
||||
| `<chat.files[0].uploadedAt>` | Upload timestamp (ISO 8601) | string |
|
||||
| `<chat.files[0].expiresAt>` | URL expiry timestamp (ISO 8601) | string |
|
||||
|
||||
## Usage Notes
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
El SDK oficial de Python para Sim te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Python utilizando el SDK oficial de Python.
|
||||
|
||||
<Callout type="info">
|
||||
El SDK de Python es compatible con Python 3.8+ y proporciona ejecución sincrónica de flujos de trabajo. Todas las ejecuciones de flujos de trabajo son actualmente sincrónicas.
|
||||
El SDK de Python es compatible con Python 3.8+ con soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
|
||||
</Callout>
|
||||
|
||||
## Instalación
|
||||
@@ -74,12 +74,17 @@ result = client.execute_workflow(
|
||||
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
|
||||
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
|
||||
- `timeout` (float, opcional): Tiempo de espera en segundos (predeterminado: 30.0)
|
||||
- `stream` (bool, opcional): Habilitar respuestas en streaming (predeterminado: False)
|
||||
- `selected_outputs` (list[str], opcional): Salidas de bloque para transmitir en formato `blockName.attribute` (p. ej., `["agent1.content"]`)
|
||||
- `async_execution` (bool, opcional): Ejecutar de forma asíncrona (predeterminado: False)
|
||||
|
||||
**Devuelve:** `WorkflowExecutionResult`
|
||||
**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Cuando `async_execution=True`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
Obtiene el estado de un flujo de trabajo (estado de implementación, etc.).
|
||||
Obtener el estado de un flujo de trabajo (estado de implementación, etc.).
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
@@ -93,7 +98,7 @@ print("Is deployed:", status.is_deployed)
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Valida que un flujo de trabajo esté listo para su ejecución.
|
||||
Validar que un flujo de trabajo está listo para su ejecución.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
@@ -107,28 +112,118 @@ if is_ready:
|
||||
|
||||
**Devuelve:** `bool`
|
||||
|
||||
##### execute_workflow_sync()
|
||||
##### get_job_status()
|
||||
|
||||
<Callout type="info">
|
||||
Actualmente, este método es idéntico a `execute_workflow()` ya que todas las ejecuciones son síncronas. Este método se proporciona para compatibilidad futura cuando se añada la ejecución asíncrona.
|
||||
</Callout>
|
||||
|
||||
Ejecuta un flujo de trabajo (actualmente síncrono, igual que `execute_workflow()`).
|
||||
Obtener el estado de una ejecución de trabajo asíncrono.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow_sync(
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `task_id` (str): El ID de tarea devuelto de la ejecución asíncrona
|
||||
|
||||
**Devuelve:** `Dict[str, Any]`
|
||||
|
||||
**Campos de respuesta:**
|
||||
- `success` (bool): Si la solicitud fue exitosa
|
||||
- `taskId` (str): El ID de la tarea
|
||||
- `status` (str): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): Contiene `startedAt`, `completedAt`, y `duration`
|
||||
- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
|
||||
- `error` (any, opcional): Detalles del error (cuando falla)
|
||||
- `estimatedDuration` (int, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Ejecutar un flujo de trabajo con reintento automático en errores de límite de velocidad usando retroceso exponencial.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"data": "some input"},
|
||||
timeout=60.0
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
|
||||
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
|
||||
- `timeout` (float): Tiempo de espera para la solicitud inicial en segundos
|
||||
- `timeout` (float, opcional): Tiempo de espera en segundos
|
||||
- `stream` (bool, opcional): Habilitar respuestas en streaming
|
||||
- `selected_outputs` (list, opcional): Salidas de bloque para transmitir
|
||||
- `async_execution` (bool, opcional): Ejecutar de forma asíncrona
|
||||
- `max_retries` (int, opcional): Número máximo de reintentos (predeterminado: 3)
|
||||
- `initial_delay` (float, opcional): Retraso inicial en segundos (predeterminado: 1.0)
|
||||
- `max_delay` (float, opcional): Retraso máximo en segundos (predeterminado: 30.0)
|
||||
- `backoff_multiplier` (float, opcional): Multiplicador de retroceso (predeterminado: 2.0)
|
||||
|
||||
**Devuelve:** `WorkflowExecutionResult`
|
||||
**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona un encabezado `retry-after`, se utilizará en su lugar.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Obtiene la información actual del límite de tasa de la última respuesta de la API.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Devuelve:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Devuelve:** `UsageLimits`
|
||||
|
||||
**Estructura de respuesta:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@@ -181,6 +288,27 @@ class WorkflowStatus:
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Códigos de error comunes:**
|
||||
- `UNAUTHORIZED`: Clave API inválida
|
||||
- `TIMEOUT`: Tiempo de espera agotado
|
||||
- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
|
||||
- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
|
||||
- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
|
||||
|
||||
## Ejemplos
|
||||
|
||||
### Ejecución básica de flujo de trabajo
|
||||
@@ -205,8 +340,8 @@ class SimStudioError(Exception):
|
||||
<Step title="Ejecutar el flujo de trabajo">
|
||||
Ejecuta el flujo de trabajo con tus datos de entrada.
|
||||
</Step>
|
||||
<Step title="Gestionar el resultado">
|
||||
Procesa el resultado de la ejecución y maneja cualquier error.
|
||||
<Step title="Manejar el resultado">
|
||||
Procesa el resultado de la ejecución y gestiona cualquier error.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
@@ -252,7 +387,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
@@ -275,22 +410,22 @@ def execute_with_error_handling():
|
||||
raise
|
||||
```
|
||||
|
||||
### Uso del administrador de contexto
|
||||
### Uso del gestor de contexto
|
||||
|
||||
Usa el cliente como un administrador de contexto para manejar automáticamente la limpieza de recursos:
|
||||
Usa el cliente como un gestor de contexto para manejar automáticamente la limpieza de recursos:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### Ejecución por lotes de flujos de trabajo
|
||||
### Ejecución de flujos de trabajo por lotes
|
||||
|
||||
Ejecuta múltiples flujos de trabajo de manera eficiente:
|
||||
|
||||
@@ -298,7 +433,7 @@ Ejecuta múltiples flujos de trabajo de manera eficiente:
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
@@ -339,6 +474,230 @@ for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Ejecución asíncrona de flujos de trabajo
|
||||
|
||||
Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Límite de tasa y reintentos
|
||||
|
||||
Maneja los límites de tasa automáticamente con retroceso exponencial:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Monitoreo de uso
|
||||
|
||||
Monitorea el uso de tu cuenta y sus límites:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Ejecución de flujo de trabajo en streaming
|
||||
|
||||
Ejecuta flujos de trabajo con respuestas en tiempo real:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
La respuesta en streaming sigue el formato de Server-Sent Events (SSE):
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Ejemplo de streaming con Flask:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Configuración del entorno
|
||||
|
||||
Configura el cliente usando variables de entorno:
|
||||
@@ -352,8 +711,8 @@ Configura el cliente usando variables de entorno:
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIMSTUDIO_API_KEY"),
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -365,13 +724,13 @@ Configura el cliente usando variables de entorno:
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIMSTUDIO_API_KEY")
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
El SDK oficial de TypeScript/JavaScript para Sim proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como con navegadores, lo que te permite ejecutar flujos de trabajo de forma programática desde tus aplicaciones Node.js, aplicaciones web y otros entornos JavaScript. Todas las ejecuciones de flujos de trabajo son actualmente síncronas.
|
||||
El SDK oficial de TypeScript/JavaScript para Sim proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como de navegador, lo que te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Node.js, aplicaciones web y otros entornos JavaScript.
|
||||
|
||||
<Callout type="info">
|
||||
El SDK de TypeScript proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como con navegadores. Todas las ejecuciones de flujos de trabajo son actualmente síncronas.
|
||||
El SDK de TypeScript proporciona seguridad de tipos completa, soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
|
||||
</Callout>
|
||||
|
||||
## Instalación
|
||||
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- `options` (ExecutionOptions, opcional):
|
||||
- `input` (any): Datos de entrada para pasar al flujo de trabajo
|
||||
- `timeout` (number): Tiempo de espera en milisegundos (predeterminado: 30000)
|
||||
- `stream` (boolean): Habilitar respuestas en streaming (predeterminado: false)
|
||||
- `selectedOutputs` (string[]): Bloquear salidas para transmitir en formato `blockName.attribute` (por ejemplo, `["agent1.content"]`)
|
||||
- `async` (boolean): Ejecutar de forma asíncrona (predeterminado: false)
|
||||
|
||||
**Devuelve:** `Promise<WorkflowExecutionResult>`
|
||||
**Devuelve:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
Cuando `async: true`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
|
||||
|
||||
##### getWorkflowStatus()
|
||||
|
||||
@@ -128,32 +133,121 @@ if (isReady) {
|
||||
|
||||
**Devuelve:** `Promise<boolean>`
|
||||
|
||||
##### executeWorkflowSync()
|
||||
##### getJobStatus()
|
||||
|
||||
<Callout type="info">
|
||||
Actualmente, este método es idéntico a `executeWorkflow()` ya que todas las ejecuciones son síncronas. Este método se proporciona para compatibilidad futura cuando se añada la ejecución asíncrona.
|
||||
</Callout>
|
||||
|
||||
Ejecutar un flujo de trabajo (actualmente síncrono, igual que `executeWorkflow()`).
|
||||
Obtener el estado de una ejecución de trabajo asíncrono.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWorkflowSync('workflow-id', {
|
||||
input: { data: 'some input' },
|
||||
timeout: 60000
|
||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
||||
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
|
||||
if (status.status === 'completed') {
|
||||
console.log('Output:', status.output);
|
||||
}
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `taskId` (string): El ID de tarea devuelto de la ejecución asíncrona
|
||||
|
||||
**Devuelve:** `Promise<JobStatus>`
|
||||
|
||||
**Campos de respuesta:**
|
||||
- `success` (boolean): Si la solicitud fue exitosa
|
||||
- `taskId` (string): El ID de la tarea
|
||||
- `status` (string): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (object): Contiene `startedAt`, `completedAt`, y `duration`
|
||||
- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
|
||||
- `error` (any, opcional): Detalles del error (cuando falla)
|
||||
- `estimatedDuration` (number, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
|
||||
|
||||
##### executeWithRetry()
|
||||
|
||||
Ejecuta un flujo de trabajo con reintento automático en errores de límite de tasa utilizando retroceso exponencial.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Hello' },
|
||||
timeout: 30000
|
||||
}, {
|
||||
maxRetries: 3, // Maximum number of retries
|
||||
initialDelay: 1000, // Initial delay in ms (1 second)
|
||||
maxDelay: 30000, // Maximum delay in ms (30 seconds)
|
||||
backoffMultiplier: 2 // Exponential backoff multiplier
|
||||
});
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `workflowId` (string): El ID del flujo de trabajo a ejecutar
|
||||
- `options` (ExecutionOptions, opcional):
|
||||
- `input` (any): Datos de entrada para pasar al flujo de trabajo
|
||||
- `timeout` (number): Tiempo de espera para la solicitud inicial en milisegundos
|
||||
- `options` (ExecutionOptions, opcional): Igual que `executeWorkflow()`
|
||||
- `retryOptions` (RetryOptions, opcional):
|
||||
- `maxRetries` (number): Número máximo de reintentos (predeterminado: 3)
|
||||
- `initialDelay` (number): Retraso inicial en ms (predeterminado: 1000)
|
||||
- `maxDelay` (number): Retraso máximo en ms (predeterminado: 30000)
|
||||
- `backoffMultiplier` (number): Multiplicador de retroceso (predeterminado: 2)
|
||||
|
||||
**Devuelve:** `Promise<WorkflowExecutionResult>`
|
||||
**Devuelve:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona una cabecera `retry-after`, se utilizará en su lugar.
|
||||
|
||||
##### getRateLimitInfo()
|
||||
|
||||
Obtiene la información actual del límite de tasa de la última respuesta de la API.
|
||||
|
||||
```typescript
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Limit:', rateLimitInfo.limit);
|
||||
console.log('Remaining:', rateLimitInfo.remaining);
|
||||
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
```
|
||||
|
||||
**Devuelve:** `RateLimitInfo | null`
|
||||
|
||||
##### getUsageLimits()
|
||||
|
||||
Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
|
||||
|
||||
```typescript
|
||||
const limits = await client.getUsageLimits();
|
||||
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
|
||||
console.log('Current period cost:', limits.usage.currentPeriodCost);
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
```
|
||||
|
||||
**Devuelve:** `Promise<UsageLimits>`
|
||||
|
||||
**Estructura de respuesta:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
success: boolean
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
async: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
authType: string // 'api' or 'manual'
|
||||
}
|
||||
usage: {
|
||||
currentPeriodCost: number
|
||||
limit: number
|
||||
plan: string // e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### setApiKey()
|
||||
|
||||
Actualizar la clave API.
|
||||
Actualiza la clave API.
|
||||
|
||||
```typescript
|
||||
client.setApiKey('new-api-key');
|
||||
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
|
||||
|
||||
##### setBaseUrl()
|
||||
|
||||
Actualizar la URL base.
|
||||
Actualiza la URL base.
|
||||
|
||||
```typescript
|
||||
client.setBaseUrl('https://my-custom-domain.com');
|
||||
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
|
||||
}
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```typescript
|
||||
interface AsyncExecutionResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
status: 'queued';
|
||||
createdAt: string;
|
||||
links: {
|
||||
status: string; // e.g., "/api/jobs/{taskId}"
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```typescript
|
||||
@@ -198,6 +306,45 @@ interface WorkflowStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```typescript
|
||||
interface RateLimitInfo {
|
||||
limit: number;
|
||||
remaining: number;
|
||||
reset: number;
|
||||
retryAfter?: number;
|
||||
}
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```typescript
|
||||
interface UsageLimits {
|
||||
success: boolean;
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
async: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
authType: string;
|
||||
};
|
||||
usage: {
|
||||
currentPeriodCost: number;
|
||||
limit: number;
|
||||
plan: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```typescript
|
||||
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
|
||||
}
|
||||
```
|
||||
|
||||
**Códigos de error comunes:**
|
||||
- `UNAUTHORIZED`: Clave API inválida
|
||||
- `TIMEOUT`: Tiempo de espera agotado
|
||||
- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
|
||||
- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
|
||||
- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
|
||||
|
||||
## Ejemplos
|
||||
|
||||
### Ejecución básica de flujo de trabajo
|
||||
@@ -216,13 +370,13 @@ class SimStudioError extends Error {
|
||||
Configura el SimStudioClient con tu clave API.
|
||||
</Step>
|
||||
<Step title="Validar el flujo de trabajo">
|
||||
Comprueba si el flujo de trabajo está implementado y listo para su ejecución.
|
||||
Comprueba si el flujo de trabajo está desplegado y listo para su ejecución.
|
||||
</Step>
|
||||
<Step title="Ejecutar el flujo de trabajo">
|
||||
Ejecuta el flujo de trabajo con tus datos de entrada.
|
||||
</Step>
|
||||
<Step title="Gestionar el resultado">
|
||||
Procesa el resultado de la ejecución y maneja cualquier error.
|
||||
<Step title="Manejar el resultado">
|
||||
Procesa el resultado de la ejecución y gestiona cualquier error.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function runWorkflow() {
|
||||
@@ -271,7 +425,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithErrorHandling() {
|
||||
@@ -315,14 +469,14 @@ Configura el cliente usando variables de entorno:
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Development configuration
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
|
||||
baseUrl: process.env.SIM_BASE_URL // optional
|
||||
});
|
||||
```
|
||||
|
||||
@@ -333,14 +487,14 @@ Configura el cliente usando variables de entorno:
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Production configuration with validation
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
|
||||
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
|
||||
});
|
||||
```
|
||||
|
||||
@@ -349,7 +503,7 @@ Configura el cliente usando variables de entorno:
|
||||
|
||||
### Integración con Express de Node.js
|
||||
|
||||
Integración con un servidor Express.js:
|
||||
Integra con un servidor Express.js:
|
||||
|
||||
```typescript
|
||||
import express from 'express';
|
||||
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const app = express();
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
app.use(express.json());
|
||||
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
export default async function handler(
|
||||
@@ -430,7 +584,7 @@ export default async function handler(
|
||||
|
||||
### Uso del navegador
|
||||
|
||||
Uso en el navegador (con la configuración CORS adecuada):
|
||||
Uso en el navegador (con configuración CORS adecuada):
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
|
||||
|
||||
### Ejemplo de hook de React
|
||||
|
||||
Crea un hook personalizado de React para la ejecución del flujo de trabajo:
|
||||
Crea un hook personalizado de React para la ejecución de flujos de trabajo:
|
||||
|
||||
```typescript
|
||||
import { useState, useCallback } from 'react';
|
||||
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
interface UseWorkflowResult {
|
||||
@@ -532,7 +686,7 @@ function WorkflowComponent() {
|
||||
<button onClick={handleExecute} disabled={loading}>
|
||||
{loading ? 'Executing...' : 'Execute Workflow'}
|
||||
</button>
|
||||
|
||||
|
||||
{error && <div>Error: {error.message}</div>}
|
||||
{result && (
|
||||
<div>
|
||||
@@ -545,38 +699,267 @@ function WorkflowComponent() {
|
||||
}
|
||||
```
|
||||
|
||||
## Obtener tu clave API
|
||||
### Ejecución asíncrona de flujos de trabajo
|
||||
|
||||
Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeAsync() {
|
||||
try {
|
||||
// Start async execution
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { data: 'large dataset' },
|
||||
async: true // Execute asynchronously
|
||||
});
|
||||
|
||||
// Check if result is an async execution
|
||||
if ('taskId' in result) {
|
||||
console.log('Task ID:', result.taskId);
|
||||
console.log('Status endpoint:', result.links.status);
|
||||
|
||||
// Poll for completion
|
||||
let status = await client.getJobStatus(result.taskId);
|
||||
|
||||
while (status.status === 'queued' || status.status === 'processing') {
|
||||
console.log('Current status:', status.status);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
|
||||
status = await client.getJobStatus(result.taskId);
|
||||
}
|
||||
|
||||
if (status.status === 'completed') {
|
||||
console.log('Workflow completed!');
|
||||
console.log('Output:', status.output);
|
||||
console.log('Duration:', status.metadata.duration);
|
||||
} else {
|
||||
console.error('Workflow failed:', status.error);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
executeAsync();
|
||||
```
|
||||
|
||||
### Límite de tasa y reintentos
|
||||
|
||||
Maneja límites de tasa automáticamente con retroceso exponencial:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithRetryHandling() {
|
||||
try {
|
||||
// Automatically retries on rate limit
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Process this' }
|
||||
}, {
|
||||
maxRetries: 5,
|
||||
initialDelay: 1000,
|
||||
maxDelay: 60000,
|
||||
backoffMultiplier: 2
|
||||
});
|
||||
|
||||
console.log('Success:', result);
|
||||
} catch (error) {
|
||||
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
||||
console.error('Rate limit exceeded after all retries');
|
||||
|
||||
// Check rate limit info
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Monitoreo de uso
|
||||
|
||||
Monitorea el uso de tu cuenta y sus límites:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function checkUsage() {
|
||||
try {
|
||||
const limits = await client.getUsageLimits();
|
||||
|
||||
console.log('=== Rate Limits ===');
|
||||
console.log('Sync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.sync.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
|
||||
|
||||
console.log('\nAsync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.async.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.async.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.async.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.async.isLimited);
|
||||
|
||||
console.log('\n=== Usage ===');
|
||||
console.log('Current period cost:
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithStreaming() {
|
||||
try {
|
||||
// Habilita streaming para salidas de bloques específicos
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { message: 'Count to five' },
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content'] // Usa el formato blockName.attribute
|
||||
});
|
||||
|
||||
console.log('Resultado del flujo de trabajo:', result);
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", dos"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**React Streaming Example:**
|
||||
|
||||
```typescript
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function StreamingWorkflow() {
|
||||
const [output, setOutput] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const executeStreaming = async () => {
|
||||
setLoading(true);
|
||||
setOutput('');
|
||||
|
||||
// IMPORTANT: Make this API call from your backend server, not the browser
|
||||
// Never expose your API key in client-side code
|
||||
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: 'Generate a story',
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content']
|
||||
})
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (reader) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.chunk) {
|
||||
setOutput(prev => prev + parsed.chunk);
|
||||
} else if (parsed.event === 'done') {
|
||||
console.log('Execution complete:', parsed.metadata);
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip invalid JSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<button onClick={executeStreaming} disabled={loading}>
|
||||
{loading ? 'Generando...' : 'Iniciar streaming'}
|
||||
</button>
|
||||
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
<Step title="Inicia sesión en Sim">
|
||||
Navega a [Sim](https://sim.ai) e inicia sesión en tu cuenta.
|
||||
<Step title="Log in to Sim">
|
||||
Navigate to [Sim](https://sim.ai) and log in to your account.
|
||||
</Step>
|
||||
<Step title="Abre tu flujo de trabajo">
|
||||
Navega al flujo de trabajo que quieres ejecutar programáticamente.
|
||||
<Step title="Open your workflow">
|
||||
Navigate to the workflow you want to execute programmatically.
|
||||
</Step>
|
||||
<Step title="Despliega tu flujo de trabajo">
|
||||
Haz clic en "Deploy" para desplegar tu flujo de trabajo si aún no ha sido desplegado.
|
||||
<Step title="Deploy your workflow">
|
||||
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
|
||||
</Step>
|
||||
<Step title="Crea o selecciona una clave API">
|
||||
Durante el proceso de despliegue, selecciona o crea una clave API.
|
||||
<Step title="Create or select an API key">
|
||||
During the deployment process, select or create an API key.
|
||||
</Step>
|
||||
<Step title="Copia la clave API">
|
||||
Copia la clave API para usarla en tu aplicación TypeScript/JavaScript.
|
||||
<Step title="Copy the API key">
|
||||
Copy the API key to use in your TypeScript/JavaScript application.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Callout type="warning">
|
||||
Mantén tu clave API segura y nunca la incluyas en el control de versiones. Usa variables de entorno o gestión de configuración segura.
|
||||
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
|
||||
</Callout>
|
||||
|
||||
## Requisitos
|
||||
## Requirements
|
||||
|
||||
- Node.js 16+
|
||||
- TypeScript 5.0+ (para proyectos TypeScript)
|
||||
- TypeScript 5.0+ (for TypeScript projects)
|
||||
|
||||
## Soporte para TypeScript
|
||||
## TypeScript Support
|
||||
|
||||
El SDK está escrito en TypeScript y proporciona seguridad de tipos completa:
|
||||
The SDK is written in TypeScript and provides full type safety:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
@@ -588,13 +971,13 @@ import {
|
||||
|
||||
// Type-safe client initialization
|
||||
const client: SimStudioClient = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Type-safe workflow execution
|
||||
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
|
||||
input: {
|
||||
message: 'Hello, TypeScript!'
|
||||
message: '¡Hola, TypeScript!'
|
||||
}
|
||||
});
|
||||
|
||||
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
|
||||
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
|
||||
```
|
||||
|
||||
## Licencia
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
|
||||
Apache-2.0
|
||||
|
||||
@@ -38,15 +38,93 @@ curl -X POST \
|
||||
|
||||
Las respuestas exitosas devuelven el resultado de ejecución serializado del Ejecutor. Los errores muestran fallos de validación, autenticación o flujo de trabajo.
|
||||
|
||||
## Respuestas en streaming
|
||||
|
||||
Habilita el streaming en tiempo real para recibir la salida del flujo de trabajo a medida que se genera, carácter por carácter. Esto es útil para mostrar las respuestas de IA progresivamente a los usuarios.
|
||||
|
||||
### Parámetros de solicitud
|
||||
|
||||
Añade estos parámetros para habilitar el streaming:
|
||||
|
||||
- `stream` - Establece a `true` para habilitar el streaming de eventos enviados por el servidor (SSE)
|
||||
- `selectedOutputs` - Array de salidas de bloques para transmitir (p. ej., `["agent1.content"]`)
|
||||
|
||||
### Formato de salida de bloque
|
||||
|
||||
Usa el formato `blockName.attribute` para especificar qué salidas de bloques transmitir:
|
||||
- Formato: `"blockName.attribute"` (p. ej., si quieres transmitir el contenido del bloque Agente 1, usarías `"agent1.content"`)
|
||||
- Los nombres de los bloques no distinguen entre mayúsculas y minúsculas y se ignoran los espacios
|
||||
|
||||
### Ejemplo de solicitud
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Count to five",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
### Formato de respuesta
|
||||
|
||||
Las respuestas en streaming utilizan el formato de eventos enviados por el servidor (SSE):
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
Cada evento incluye:
|
||||
- **Fragmentos de streaming**: `{"blockId": "...", "chunk": "text"}` - Texto en tiempo real a medida que se genera
|
||||
- **Evento final**: `{"event": "done", ...}` - Metadatos de ejecución y resultados completos
|
||||
- **Terminador**: `[DONE]` - Señala el fin del stream
|
||||
|
||||
### Streaming de múltiples bloques
|
||||
|
||||
Cuando `selectedOutputs` incluye múltiples bloques, cada fragmento indica qué bloque lo produjo:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Process this request",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content", "agent2.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
El campo `blockId` en cada fragmento te permite dirigir la salida al elemento de UI correcto:
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
|
||||
|
||||
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
|
||||
|
||||
data: {"blockId":"agent1-uuid","chunk":" complete"}
|
||||
```
|
||||
|
||||
## Referencia de salida
|
||||
|
||||
| Referencia | Descripción |
|
||||
|-----------|-------------|
|
||||
| `<api.field>` | Campo definido en el Formato de Entrada |
|
||||
| `<api.input>` | Cuerpo completo estructurado de la solicitud |
|
||||
| `<api.field>` | Campo definido en el formato de entrada |
|
||||
| `<api.input>` | Cuerpo de solicitud estructurado completo |
|
||||
|
||||
Si no se define un Formato de Entrada, el ejecutor expone el JSON sin procesar solo en `<api.input>`.
|
||||
Si no se define un formato de entrada, el ejecutor expone el JSON sin procesar solo en `<api.input>`.
|
||||
|
||||
<Callout type="warning">
|
||||
Un flujo de trabajo puede contener solo un Disparador de API. Publica una nueva implementación después de realizar cambios para que el punto de conexión se mantenga actualizado.
|
||||
Un flujo de trabajo puede contener solo un disparador de API. Publica una nueva implementación después de los cambios para que el endpoint se mantenga actualizado.
|
||||
</Callout>
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
Le SDK Python officiel pour Sim vous permet d'exécuter des workflows de manière programmatique à partir de vos applications Python en utilisant le SDK Python officiel.
|
||||
|
||||
<Callout type="info">
|
||||
Le SDK Python prend en charge Python 3.8+ et fournit une exécution synchrone des workflows. Toutes les exécutions de workflow sont actuellement synchrones.
|
||||
Le SDK Python prend en charge Python 3.8+ avec support d'exécution asynchrone, limitation automatique du débit avec backoff exponentiel, et suivi d'utilisation.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
@@ -71,11 +71,16 @@ result = client.execute_workflow(
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'ID du workflow à exécuter
|
||||
- `workflow_id` (str) : L'identifiant du workflow à exécuter
|
||||
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
|
||||
- `timeout` (float, facultatif) : Délai d'attente en secondes (par défaut : 30.0)
|
||||
- `timeout` (float, facultatif) : Délai d'expiration en secondes (par défaut : 30.0)
|
||||
- `stream` (bool, facultatif) : Activer les réponses en streaming (par défaut : False)
|
||||
- `selected_outputs` (list[str], facultatif) : Sorties de blocs à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
|
||||
- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone (par défaut : False)
|
||||
|
||||
**Retourne :** `WorkflowExecutionResult`
|
||||
**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Lorsque `async_execution=True`, retourne immédiatement un identifiant de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
@@ -87,13 +92,13 @@ print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'ID du workflow
|
||||
- `workflow_id` (str) : L'identifiant du workflow
|
||||
|
||||
**Retourne :** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Valide qu'un workflow est prêt pour l'exécution.
|
||||
Valider qu'un workflow est prêt pour l'exécution.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
@@ -107,32 +112,122 @@ if is_ready:
|
||||
|
||||
**Retourne :** `bool`
|
||||
|
||||
##### execute_workflow_sync()
|
||||
##### get_job_status()
|
||||
|
||||
<Callout type="info">
|
||||
Actuellement, cette méthode est identique à `execute_workflow()` puisque toutes les exécutions sont synchrones. Cette méthode est fournie pour une compatibilité future lorsque l'exécution asynchrone sera ajoutée.
|
||||
</Callout>
|
||||
|
||||
Exécute un workflow (actuellement synchrone, identique à `execute_workflow()`).
|
||||
Obtenir le statut d'une exécution de tâche asynchrone.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow_sync(
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `task_id` (str) : L'identifiant de tâche retourné par l'exécution asynchrone
|
||||
|
||||
**Retourne :** `Dict[str, Any]`
|
||||
|
||||
**Champs de réponse :**
|
||||
- `success` (bool) : Si la requête a réussi
|
||||
- `taskId` (str) : L'identifiant de la tâche
|
||||
- `status` (str) : L'un des états suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict) : Contient `startedAt`, `completedAt`, et `duration`
|
||||
- `output` (any, facultatif) : La sortie du workflow (une fois terminé)
|
||||
- `error` (any, facultatif) : Détails de l'erreur (en cas d'échec)
|
||||
- `estimatedDuration` (int, facultatif) : Durée estimée en millisecondes (lors du traitement/mise en file d'attente)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Exécuter un workflow avec réessai automatique en cas d'erreurs de limitation de débit, en utilisant un backoff exponentiel.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"data": "some input"},
|
||||
timeout=60.0
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'identifiant du workflow à exécuter
|
||||
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
|
||||
- `timeout` (float) : Délai d'attente pour la requête initiale en secondes
|
||||
- `timeout` (float, facultatif) : Délai d'expiration en secondes
|
||||
- `stream` (bool, facultatif) : Activer les réponses en streaming
|
||||
- `selected_outputs` (list, facultatif) : Sorties de blocs à diffuser
|
||||
- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone
|
||||
- `max_retries` (int, facultatif) : Nombre maximum de tentatives (par défaut : 3)
|
||||
- `initial_delay` (float, facultatif) : Délai initial en secondes (par défaut : 1.0)
|
||||
- `max_delay` (float, facultatif) : Délai maximum en secondes (par défaut : 30.0)
|
||||
- `backoff_multiplier` (float, facultatif) : Multiplicateur de backoff (par défaut : 2.0)
|
||||
|
||||
**Retourne :** `WorkflowExecutionResult`
|
||||
**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25% pour éviter l'effet de horde. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Obtenir les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Retourne :** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Obtenir les limites d'utilisation actuelles et les informations de quota pour votre compte.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Retourne :** `UsageLimits`
|
||||
|
||||
**Structure de la réponse :**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
Met à jour la clé API.
|
||||
Mettre à jour la clé API.
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
@@ -140,7 +235,7 @@ client.set_api_key("new-api-key")
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
Met à jour l'URL de base.
|
||||
Mettre à jour l'URL de base.
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
@@ -148,7 +243,7 @@ client.set_base_url("https://my-custom-domain.com")
|
||||
|
||||
##### close()
|
||||
|
||||
Ferme la session HTTP sous-jacente.
|
||||
Fermer la session HTTP sous-jacente.
|
||||
|
||||
```python
|
||||
client.close()
|
||||
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@@ -181,6 +288,27 @@ class WorkflowStatus:
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
@@ -191,19 +319,26 @@ class SimStudioError(Exception):
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Codes d'erreur courants :**
|
||||
- `UNAUTHORIZED` : Clé API invalide
|
||||
- `TIMEOUT` : Délai d'attente de la requête dépassé
|
||||
- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
|
||||
- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
|
||||
- `EXECUTION_ERROR` : Échec de l'exécution du workflow
|
||||
|
||||
## Exemples
|
||||
|
||||
### Exécution de flux de travail basique
|
||||
### Exécution basique d'un workflow
|
||||
|
||||
<Steps>
|
||||
<Step title="Initialiser le client">
|
||||
Configurez le SimStudioClient avec votre clé API.
|
||||
</Step>
|
||||
<Step title="Valider le flux de travail">
|
||||
Vérifiez si le flux de travail est déployé et prêt pour l'exécution.
|
||||
<Step title="Valider le workflow">
|
||||
Vérifiez si le workflow est déployé et prêt pour l'exécution.
|
||||
</Step>
|
||||
<Step title="Exécuter le flux de travail">
|
||||
Lancez le flux de travail avec vos données d'entrée.
|
||||
<Step title="Exécuter le workflow">
|
||||
Lancez le workflow avec vos données d'entrée.
|
||||
</Step>
|
||||
<Step title="Gérer le résultat">
|
||||
Traitez le résultat de l'exécution et gérez les éventuelles erreurs.
|
||||
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
@@ -246,13 +381,13 @@ run_workflow()
|
||||
|
||||
### Gestion des erreurs
|
||||
|
||||
Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du flux de travail :
|
||||
Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du workflow :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
@@ -284,21 +419,21 @@ from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### Exécution de flux de travail par lots
|
||||
### Exécution de workflows par lots
|
||||
|
||||
Exécutez plusieurs flux de travail efficacement :
|
||||
Exécutez plusieurs workflows efficacement :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
@@ -339,6 +474,230 @@ for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Exécution asynchrone de workflow
|
||||
|
||||
Exécutez des workflows de manière asynchrone pour les tâches de longue durée :
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Limitation de débit et nouvelle tentative
|
||||
|
||||
Gérez les limites de débit automatiquement avec un retrait exponentiel :
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Surveillance de l'utilisation
|
||||
|
||||
Surveillez l'utilisation et les limites de votre compte :
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Exécution de workflow en streaming
|
||||
|
||||
Exécutez des workflows avec des réponses en streaming en temps réel :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
La réponse en streaming suit le format Server-Sent Events (SSE) :
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Exemple de streaming avec Flask :**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Configuration de l'environnement
|
||||
|
||||
Configurez le client en utilisant des variables d'environnement :
|
||||
@@ -352,8 +711,8 @@ Configurez le client en utilisant des variables d'environnement :
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIMSTUDIO_API_KEY"),
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -365,30 +724,30 @@ Configurez le client en utilisant des variables d'environnement :
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIMSTUDIO_API_KEY")
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Obtenir votre clé API
|
||||
## Obtention de votre clé API
|
||||
|
||||
<Steps>
|
||||
<Step title="Connectez-vous à Sim">
|
||||
Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
|
||||
</Step>
|
||||
<Step title="Ouvrez votre flux de travail">
|
||||
Naviguez vers le flux de travail que vous souhaitez exécuter par programmation.
|
||||
<Step title="Ouvrez votre workflow">
|
||||
Accédez au workflow que vous souhaitez exécuter par programmation.
|
||||
</Step>
|
||||
<Step title="Déployez votre flux de travail">
|
||||
Cliquez sur "Déployer" pour déployer votre flux de travail s'il n'a pas encore été déployé.
|
||||
<Step title="Déployez votre workflow">
|
||||
Cliquez sur "Déployer" pour déployer votre workflow s'il n'a pas encore été déployé.
|
||||
</Step>
|
||||
<Step title="Créez ou sélectionnez une clé API">
|
||||
Pendant le processus de déploiement, sélectionnez ou créez une clé API.
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
Le SDK officiel TypeScript/JavaScript pour Sim offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur, vous permettant d'exécuter des workflows de manière programmatique depuis vos applications Node.js, applications web et autres environnements JavaScript. Toutes les exécutions de workflow sont actuellement synchrones.
|
||||
Le SDK officiel TypeScript/JavaScript pour Sim offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur, vous permettant d'exécuter des workflows par programmation depuis vos applications Node.js, applications web et autres environnements JavaScript.
|
||||
|
||||
<Callout type="info">
|
||||
Le SDK TypeScript offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur. Toutes les exécutions de workflow sont actuellement synchrones.
|
||||
Le SDK TypeScript offre une sécurité de type complète, la prise en charge de l'exécution asynchrone, une limitation automatique du débit avec backoff exponentiel et le suivi d'utilisation.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
@@ -91,12 +91,17 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflowId` (string) : L'identifiant du workflow à exécuter
|
||||
- `options` (ExecutionOptions, facultatif) :
|
||||
- `workflowId` (string) : L'ID du workflow à exécuter
|
||||
- `options` (ExecutionOptions, optionnel) :
|
||||
- `input` (any) : Données d'entrée à transmettre au workflow
|
||||
- `timeout` (number) : Délai d'expiration en millisecondes (par défaut : 30000)
|
||||
- `stream` (boolean) : Activer les réponses en streaming (par défaut : false)
|
||||
- `selectedOutputs` (string[]) : Bloquer les sorties à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
|
||||
- `async` (boolean) : Exécuter de manière asynchrone (par défaut : false)
|
||||
|
||||
**Retourne :** `Promise<WorkflowExecutionResult>`
|
||||
**Retourne :** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
Lorsque `async: true`, retourne immédiatement avec un ID de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
|
||||
|
||||
##### getWorkflowStatus()
|
||||
|
||||
@@ -108,7 +113,7 @@ console.log('Is deployed:', status.isDeployed);
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflowId` (string) : L'identifiant du workflow
|
||||
- `workflowId` (string) : L'ID du workflow
|
||||
|
||||
**Retourne :** `Promise<WorkflowStatus>`
|
||||
|
||||
@@ -124,36 +129,125 @@ if (isReady) {
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflowId` (string) : L'identifiant du workflow
|
||||
- `workflowId` (string) : L'ID du workflow
|
||||
|
||||
**Retourne :** `Promise<boolean>`
|
||||
|
||||
##### executeWorkflowSync()
|
||||
##### getJobStatus()
|
||||
|
||||
<Callout type="info">
|
||||
Actuellement, cette méthode est identique à `executeWorkflow()` puisque toutes les exécutions sont synchrones. Cette méthode est fournie pour une compatibilité future lorsque l'exécution asynchrone sera ajoutée.
|
||||
</Callout>
|
||||
|
||||
Exécuter un workflow (actuellement synchrone, identique à `executeWorkflow()`).
|
||||
Obtenir le statut d'une exécution de tâche asynchrone.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWorkflowSync('workflow-id', {
|
||||
input: { data: 'some input' },
|
||||
timeout: 60000
|
||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
||||
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
|
||||
if (status.status === 'completed') {
|
||||
console.log('Output:', status.output);
|
||||
}
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `taskId` (string) : L'ID de tâche retourné par l'exécution asynchrone
|
||||
|
||||
**Retourne :** `Promise<JobStatus>`
|
||||
|
||||
**Champs de réponse :**
|
||||
- `success` (boolean) : Indique si la requête a réussi
|
||||
- `taskId` (string) : L'ID de la tâche
|
||||
- `status` (string) : L'un des statuts suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (object) : Contient `startedAt`, `completedAt`, et `duration`
|
||||
- `output` (any, optionnel) : La sortie du workflow (une fois terminé)
|
||||
- `error` (any, optionnel) : Détails de l'erreur (en cas d'échec)
|
||||
- `estimatedDuration` (number, optionnel) : Durée estimée en millisecondes (lorsqu'en traitement/en file d'attente)
|
||||
|
||||
##### executeWithRetry()
|
||||
|
||||
Exécute un workflow avec une nouvelle tentative automatique en cas d'erreurs de limite de débit en utilisant un backoff exponentiel.
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Hello' },
|
||||
timeout: 30000
|
||||
}, {
|
||||
maxRetries: 3, // Maximum number of retries
|
||||
initialDelay: 1000, // Initial delay in ms (1 second)
|
||||
maxDelay: 30000, // Maximum delay in ms (30 seconds)
|
||||
backoffMultiplier: 2 // Exponential backoff multiplier
|
||||
});
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflowId` (string) : L'identifiant du workflow à exécuter
|
||||
- `options` (ExecutionOptions, facultatif) :
|
||||
- `input` (any) : Données d'entrée à transmettre au workflow
|
||||
- `timeout` (number) : Délai d'expiration pour la requête initiale en millisecondes
|
||||
- `options` (ExecutionOptions, facultatif) : Identique à `executeWorkflow()`
|
||||
- `retryOptions` (RetryOptions, facultatif) :
|
||||
- `maxRetries` (number) : Nombre maximum de tentatives (par défaut : 3)
|
||||
- `initialDelay` (number) : Délai initial en ms (par défaut : 1000)
|
||||
- `maxDelay` (number) : Délai maximum en ms (par défaut : 30000)
|
||||
- `backoffMultiplier` (number) : Multiplicateur de backoff (par défaut : 2)
|
||||
|
||||
**Retourne :** `Promise<WorkflowExecutionResult>`
|
||||
**Retourne :** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25 % pour éviter l'effet de rafale. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
|
||||
|
||||
##### getRateLimitInfo()
|
||||
|
||||
Obtient les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
|
||||
|
||||
```typescript
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Limit:', rateLimitInfo.limit);
|
||||
console.log('Remaining:', rateLimitInfo.remaining);
|
||||
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
```
|
||||
|
||||
**Retourne :** `RateLimitInfo | null`
|
||||
|
||||
##### getUsageLimits()
|
||||
|
||||
Obtient les limites d'utilisation actuelles et les informations de quota pour votre compte.
|
||||
|
||||
```typescript
|
||||
const limits = await client.getUsageLimits();
|
||||
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
|
||||
console.log('Current period cost:', limits.usage.currentPeriodCost);
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
```
|
||||
|
||||
**Retourne :** `Promise<UsageLimits>`
|
||||
|
||||
**Structure de la réponse :**
|
||||
|
||||
```typescript
|
||||
{
|
||||
success: boolean
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
async: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
authType: string // 'api' or 'manual'
|
||||
}
|
||||
usage: {
|
||||
currentPeriodCost: number
|
||||
limit: number
|
||||
plan: string // e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### setApiKey()
|
||||
|
||||
Mettre à jour la clé API.
|
||||
Met à jour la clé API.
|
||||
|
||||
```typescript
|
||||
client.setApiKey('new-api-key');
|
||||
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
|
||||
|
||||
##### setBaseUrl()
|
||||
|
||||
Mettre à jour l'URL de base.
|
||||
Met à jour l'URL de base.
|
||||
|
||||
```typescript
|
||||
client.setBaseUrl('https://my-custom-domain.com');
|
||||
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
|
||||
}
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```typescript
|
||||
interface AsyncExecutionResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
status: 'queued';
|
||||
createdAt: string;
|
||||
links: {
|
||||
status: string; // e.g., "/api/jobs/{taskId}"
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```typescript
|
||||
@@ -198,6 +306,45 @@ interface WorkflowStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```typescript
|
||||
interface RateLimitInfo {
|
||||
limit: number;
|
||||
remaining: number;
|
||||
reset: number;
|
||||
retryAfter?: number;
|
||||
}
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```typescript
|
||||
interface UsageLimits {
|
||||
success: boolean;
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
async: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
authType: string;
|
||||
};
|
||||
usage: {
|
||||
currentPeriodCost: number;
|
||||
limit: number;
|
||||
plan: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```typescript
|
||||
@@ -207,9 +354,16 @@ class SimStudioError extends Error {
|
||||
}
|
||||
```
|
||||
|
||||
**Codes d'erreur courants :**
|
||||
- `UNAUTHORIZED` : Clé API invalide
|
||||
- `TIMEOUT` : Délai d'attente de la requête dépassé
|
||||
- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
|
||||
- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
|
||||
- `EXECUTION_ERROR` : Échec de l'exécution du workflow
|
||||
|
||||
## Exemples
|
||||
|
||||
### Exécution de workflow basique
|
||||
### Exécution basique d'un workflow
|
||||
|
||||
<Steps>
|
||||
<Step title="Initialiser le client">
|
||||
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function runWorkflow() {
|
||||
@@ -271,7 +425,7 @@ Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithErrorHandling() {
|
||||
@@ -315,14 +469,14 @@ Configurez le client en utilisant des variables d'environnement :
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Development configuration
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
|
||||
baseUrl: process.env.SIM_BASE_URL // optional
|
||||
});
|
||||
```
|
||||
|
||||
@@ -333,14 +487,14 @@ Configurez le client en utilisant des variables d'environnement :
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Production configuration with validation
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
|
||||
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
|
||||
});
|
||||
```
|
||||
|
||||
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const app = express();
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
app.use(express.json());
|
||||
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
export default async function handler(
|
||||
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
|
||||
|
||||
### Exemple de hook React
|
||||
|
||||
Créez un hook React personnalisé pour l'exécution du workflow :
|
||||
Créer un hook React personnalisé pour l'exécution de workflow :
|
||||
|
||||
```typescript
|
||||
import { useState, useCallback } from 'react';
|
||||
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
interface UseWorkflowResult {
|
||||
@@ -532,7 +686,7 @@ function WorkflowComponent() {
|
||||
<button onClick={handleExecute} disabled={loading}>
|
||||
{loading ? 'Executing...' : 'Execute Workflow'}
|
||||
</button>
|
||||
|
||||
|
||||
{error && <div>Error: {error.message}</div>}
|
||||
{result && (
|
||||
<div>
|
||||
@@ -545,38 +699,267 @@ function WorkflowComponent() {
|
||||
}
|
||||
```
|
||||
|
||||
## Obtenir votre clé API
|
||||
### Exécution asynchrone de workflow
|
||||
|
||||
Exécuter des workflows de manière asynchrone pour les tâches de longue durée :
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeAsync() {
|
||||
try {
|
||||
// Start async execution
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { data: 'large dataset' },
|
||||
async: true // Execute asynchronously
|
||||
});
|
||||
|
||||
// Check if result is an async execution
|
||||
if ('taskId' in result) {
|
||||
console.log('Task ID:', result.taskId);
|
||||
console.log('Status endpoint:', result.links.status);
|
||||
|
||||
// Poll for completion
|
||||
let status = await client.getJobStatus(result.taskId);
|
||||
|
||||
while (status.status === 'queued' || status.status === 'processing') {
|
||||
console.log('Current status:', status.status);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
|
||||
status = await client.getJobStatus(result.taskId);
|
||||
}
|
||||
|
||||
if (status.status === 'completed') {
|
||||
console.log('Workflow completed!');
|
||||
console.log('Output:', status.output);
|
||||
console.log('Duration:', status.metadata.duration);
|
||||
} else {
|
||||
console.error('Workflow failed:', status.error);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
executeAsync();
|
||||
```
|
||||
|
||||
### Limitation de débit et nouvelle tentative
|
||||
|
||||
Gérer automatiquement les limites de débit avec backoff exponentiel :
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithRetryHandling() {
|
||||
try {
|
||||
// Automatically retries on rate limit
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Process this' }
|
||||
}, {
|
||||
maxRetries: 5,
|
||||
initialDelay: 1000,
|
||||
maxDelay: 60000,
|
||||
backoffMultiplier: 2
|
||||
});
|
||||
|
||||
console.log('Success:', result);
|
||||
} catch (error) {
|
||||
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
||||
console.error('Rate limit exceeded after all retries');
|
||||
|
||||
// Check rate limit info
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Surveillance d'utilisation
|
||||
|
||||
Surveiller l'utilisation et les limites de votre compte :
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function checkUsage() {
|
||||
try {
|
||||
const limits = await client.getUsageLimits();
|
||||
|
||||
console.log('=== Rate Limits ===');
|
||||
console.log('Sync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.sync.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
|
||||
|
||||
console.log('\nAsync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.async.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.async.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.async.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.async.isLimited);
|
||||
|
||||
console.log('\n=== Usage ===');
|
||||
console.log('Current period cost:
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithStreaming() {
|
||||
try {
|
||||
// Activer le streaming pour des sorties de blocs spécifiques
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { message: 'Compter jusqu'à cinq' },
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content'] // Utiliser le format blockName.attribute
|
||||
});
|
||||
|
||||
console.log('Résultat du workflow :', result);
|
||||
} catch (error) {
|
||||
console.error('Erreur :', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", deux"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**React Streaming Example:**
|
||||
|
||||
```typescript
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function StreamingWorkflow() {
|
||||
const [output, setOutput] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const executeStreaming = async () => {
|
||||
setLoading(true);
|
||||
setOutput('');
|
||||
|
||||
// IMPORTANT: Make this API call from your backend server, not the browser
|
||||
// Never expose your API key in client-side code
|
||||
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: 'Generate a story',
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content']
|
||||
})
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (reader) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.chunk) {
|
||||
setOutput(prev => prev + parsed.chunk);
|
||||
} else if (parsed.event === 'done') {
|
||||
console.log('Execution complete:', parsed.metadata);
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip invalid JSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<button onClick={executeStreaming} disabled={loading}>
|
||||
{loading ? 'Génération en cours...' : 'Démarrer le streaming'}
|
||||
</button>
|
||||
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
<Step title="Connectez-vous à Sim">
|
||||
Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
|
||||
<Step title="Log in to Sim">
|
||||
Navigate to [Sim](https://sim.ai) and log in to your account.
|
||||
</Step>
|
||||
<Step title="Ouvrez votre workflow">
|
||||
Accédez au workflow que vous souhaitez exécuter par programmation.
|
||||
<Step title="Open your workflow">
|
||||
Navigate to the workflow you want to execute programmatically.
|
||||
</Step>
|
||||
<Step title="Déployez votre workflow">
|
||||
Cliquez sur « Déployer » pour déployer votre workflow s'il n'a pas encore été déployé.
|
||||
<Step title="Deploy your workflow">
|
||||
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
|
||||
</Step>
|
||||
<Step title="Créez ou sélectionnez une clé API">
|
||||
Pendant le processus de déploiement, sélectionnez ou créez une clé API.
|
||||
<Step title="Create or select an API key">
|
||||
During the deployment process, select or create an API key.
|
||||
</Step>
|
||||
<Step title="Copiez la clé API">
|
||||
Copiez la clé API à utiliser dans votre application TypeScript/JavaScript.
|
||||
<Step title="Copy the API key">
|
||||
Copy the API key to use in your TypeScript/JavaScript application.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Callout type="warning">
|
||||
Gardez votre clé API en sécurité et ne la soumettez jamais au contrôle de version. Utilisez des variables d'environnement ou une gestion de configuration sécurisée.
|
||||
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
|
||||
</Callout>
|
||||
|
||||
## Prérequis
|
||||
## Requirements
|
||||
|
||||
- Node.js 16+
|
||||
- TypeScript 5.0+ (pour les projets TypeScript)
|
||||
- TypeScript 5.0+ (for TypeScript projects)
|
||||
|
||||
## Support TypeScript
|
||||
## TypeScript Support
|
||||
|
||||
Le SDK est écrit en TypeScript et offre une sécurité de type complète :
|
||||
The SDK is written in TypeScript and provides full type safety:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
@@ -588,13 +971,13 @@ import {
|
||||
|
||||
// Type-safe client initialization
|
||||
const client: SimStudioClient = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Type-safe workflow execution
|
||||
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
|
||||
input: {
|
||||
message: 'Hello, TypeScript!'
|
||||
message: 'Bonjour, TypeScript !'
|
||||
}
|
||||
});
|
||||
|
||||
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
|
||||
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
|
||||
```
|
||||
|
||||
## Licence
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
|
||||
Apache-2.0
|
||||
|
||||
@@ -38,6 +38,84 @@ curl -X POST \
|
||||
|
||||
Les réponses réussies renvoient le résultat d'exécution sérialisé de l'exécuteur. Les erreurs révèlent des problèmes de validation, d'authentification ou d'échec du workflow.
|
||||
|
||||
## Réponses en streaming
|
||||
|
||||
Activez le streaming en temps réel pour recevoir les résultats du workflow au fur et à mesure qu'ils sont générés, caractère par caractère. Cela est utile pour afficher progressivement les réponses de l'IA aux utilisateurs.
|
||||
|
||||
### Paramètres de requête
|
||||
|
||||
Ajoutez ces paramètres pour activer le streaming :
|
||||
|
||||
- `stream` - Définissez à `true` pour activer le streaming Server-Sent Events (SSE)
|
||||
- `selectedOutputs` - Tableau des sorties de blocs à diffuser en streaming (par exemple, `["agent1.content"]`)
|
||||
|
||||
### Format de sortie de bloc
|
||||
|
||||
Utilisez le format `blockName.attribute` pour spécifier quelles sorties de blocs diffuser en streaming :
|
||||
- Format : `"blockName.attribute"` (par exemple, si vous souhaitez diffuser en streaming le contenu du bloc Agent 1, vous utiliseriez `"agent1.content"`)
|
||||
- Les noms de blocs ne sont pas sensibles à la casse et les espaces sont ignorés
|
||||
|
||||
### Exemple de requête
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Count to five",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
### Format de réponse
|
||||
|
||||
Les réponses en streaming utilisent le format Server-Sent Events (SSE) :
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
Chaque événement comprend :
|
||||
- **Fragments en streaming** : `{"blockId": "...", "chunk": "text"}` - Texte en temps réel au fur et à mesure qu'il est généré
|
||||
- **Événement final** : `{"event": "done", ...}` - Métadonnées d'exécution et résultats complets
|
||||
- **Terminateur** : `[DONE]` - Signale la fin du flux
|
||||
|
||||
### Streaming de plusieurs blocs
|
||||
|
||||
Lorsque `selectedOutputs` inclut plusieurs blocs, chaque fragment indique quel bloc l'a produit :
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Process this request",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content", "agent2.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
Le champ `blockId` dans chaque fragment vous permet d'acheminer la sortie vers l'élément d'interface utilisateur approprié :
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
|
||||
|
||||
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
|
||||
|
||||
data: {"blockId":"agent1-uuid","chunk":" complete"}
|
||||
```
|
||||
|
||||
## Référence des sorties
|
||||
|
||||
| Référence | Description |
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
Simの公式Python SDKを使用すると、公式Python SDKを使用してPythonアプリケーションからプログラムでワークフローを実行できます。
|
||||
|
||||
<Callout type="info">
|
||||
Python SDKはPython 3.8以上をサポートし、同期的なワークフロー実行を提供します。現在、すべてのワークフロー実行は同期的です。
|
||||
Python SDKはPython 3.8以上をサポートし、非同期実行、指数バックオフによる自動レート制限、使用状況追跡機能を提供します。
|
||||
</Callout>
|
||||
|
||||
## インストール
|
||||
@@ -70,12 +70,17 @@ result = client.execute_workflow(
|
||||
)
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): 実行するワークフローのID
|
||||
- `input_data` (dict, オプション): ワークフローに渡す入力データ
|
||||
- `timeout` (float, オプション): タイムアウト(秒)(デフォルト:30.0)
|
||||
- `timeout` (float, オプション): タイムアウト(秒)(デフォルト: 30.0)
|
||||
- `stream` (bool, オプション): ストリーミングレスポンスを有効にする(デフォルト: False)
|
||||
- `selected_outputs` (list[str], オプション): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`)
|
||||
- `async_execution` (bool, オプション): 非同期実行(デフォルト: False)
|
||||
|
||||
**戻り値:** `WorkflowExecutionResult`
|
||||
**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
`async_execution=True`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
@@ -86,7 +91,7 @@ status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): ワークフローのID
|
||||
|
||||
**戻り値:** `WorkflowStatus`
|
||||
@@ -107,28 +112,118 @@ if is_ready:
|
||||
|
||||
**戻り値:** `bool`
|
||||
|
||||
##### execute_workflow_sync()
|
||||
##### get_job_status()
|
||||
|
||||
<Callout type="info">
|
||||
現在、このメソッドは `execute_workflow()` と同一です。すべての実行は同期的に行われるためです。このメソッドは、将来的に非同期実行が追加された際の互換性のために提供されています。
|
||||
</Callout>
|
||||
|
||||
ワークフローを実行します(現在は同期的、`execute_workflow()` と同じ)。
|
||||
非同期ジョブ実行のステータスを取得します。
|
||||
|
||||
```python
|
||||
result = client.execute_workflow_sync(
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `task_id` (str): 非同期実行から返されたタスクID
|
||||
|
||||
**戻り値:** `Dict[str, Any]`
|
||||
|
||||
**レスポンスフィールド:**
|
||||
- `success` (bool): リクエストが成功したかどうか
|
||||
- `taskId` (str): タスクID
|
||||
- `status` (str): 次のいずれか: `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): `startedAt`, `completedAt`, `duration`を含む
|
||||
- `output` (any, オプション): ワークフロー出力(完了時)
|
||||
- `error` (any, オプション): エラー詳細(失敗時)
|
||||
- `estimatedDuration` (int, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
指数バックオフを使用してレート制限エラーで自動的に再試行するワークフロー実行。
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"data": "some input"},
|
||||
timeout=60.0
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): 実行するワークフローのID
|
||||
- `input_data` (dict, optional): ワークフローに渡す入力データ
|
||||
- `timeout` (float): 初期リクエストのタイムアウト(秒)
|
||||
- `input_data` (dict, オプション): ワークフローに渡す入力データ
|
||||
- `timeout` (float, オプション): タイムアウト(秒)
|
||||
- `stream` (bool, オプション): ストリーミングレスポンスを有効にする
|
||||
- `selected_outputs` (list, オプション): ストリーミングするブロック出力
|
||||
- `async_execution` (bool, オプション): 非同期実行
|
||||
- `max_retries` (int, オプション): 最大再試行回数(デフォルト: 3)
|
||||
- `initial_delay` (float, オプション): 初期遅延(秒)(デフォルト: 1.0)
|
||||
- `max_delay` (float, オプション): 最大遅延(秒)(デフォルト: 30.0)
|
||||
- `backoff_multiplier` (float, オプション): バックオフ乗数(デフォルト: 2.0)
|
||||
|
||||
**戻り値:** `WorkflowExecutionResult`
|
||||
**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
リトライロジックは、サンダリングハード問題を防ぐために±25%のジッターを伴う指数バックオフ(1秒→2秒→4秒→8秒...)を使用します。APIが `retry-after` ヘッダーを提供する場合、代わりにそれが使用されます。
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
最後のAPIレスポンスから現在のレート制限情報を取得します。
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**戻り値:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
アカウントの現在の使用制限とクォータ情報を取得します。
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**戻り値:** `UsageLimits`
|
||||
|
||||
**レスポンス構造:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@@ -181,6 +288,27 @@ class WorkflowStatus:
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**一般的なエラーコード:**
|
||||
- `UNAUTHORIZED`: 無効なAPIキー
|
||||
- `TIMEOUT`: リクエストがタイムアウトしました
|
||||
- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
|
||||
- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
|
||||
- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
|
||||
|
||||
## 例
|
||||
|
||||
### 基本的なワークフロー実行
|
||||
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
@@ -252,7 +387,7 @@ run_workflow()
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
@@ -277,14 +412,14 @@ def execute_with_error_handling():
|
||||
|
||||
### コンテキストマネージャーの使用
|
||||
|
||||
リソースのクリーンアップを自動的に処理するためにコンテキストマネージャーとしてクライアントを使用します:
|
||||
リソースのクリーンアップを自動的に処理するためにクライアントをコンテキストマネージャーとして使用します:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
@@ -298,7 +433,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
@@ -339,6 +474,230 @@ for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### 非同期ワークフロー実行
|
||||
|
||||
長時間実行されるタスクのためにワークフローを非同期で実行します:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### レート制限とリトライ
|
||||
|
||||
指数バックオフを使用して自動的にレート制限を処理します:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### 使用状況モニタリング
|
||||
|
||||
アカウントの使用状況と制限をモニタリングします:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### ワークフローの実行ストリーミング
|
||||
|
||||
リアルタイムのストリーミングレスポンスでワークフローを実行します:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
ストリーミングレスポンスはServer-Sent Events(SSE)形式に従います:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flaskストリーミングの例:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### 環境設定
|
||||
|
||||
環境変数を使用してクライアントを設定します:
|
||||
@@ -352,8 +711,8 @@ for result in results:
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIMSTUDIO_API_KEY"),
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -365,13 +724,13 @@ for result in results:
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIMSTUDIO_API_KEY")
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
公式TypeScript/JavaScript SDKはSimのために完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。これにより、Node.jsアプリケーション、Webアプリケーション、その他のJavaScript環境からプログラムでワークフローを実行することができます。現在、すべてのワークフロー実行は同期的に行われます。
|
||||
Sim用の公式TypeScript/JavaScript SDKは、完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。これにより、Node.jsアプリケーション、Webアプリケーション、その他のJavaScript環境からプログラムによってワークフローを実行することができます。
|
||||
|
||||
<Callout type="info">
|
||||
TypeScript SDKは完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。現在、すべてのワークフロー実行は同期的に行われます。
|
||||
TypeScript SDKは、完全な型安全性、非同期実行サポート、指数バックオフによる自動レート制限、使用状況追跡を提供します。
|
||||
</Callout>
|
||||
|
||||
## インストール
|
||||
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- `options` (ExecutionOptions, オプション):
|
||||
- `input` (any): ワークフローに渡す入力データ
|
||||
- `timeout` (number): タイムアウト(ミリ秒)(デフォルト: 30000)
|
||||
- `stream` (boolean): ストリーミングレスポンスを有効にする(デフォルト: false)
|
||||
- `selectedOutputs` (string[]): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`)
|
||||
- `async` (boolean): 非同期実行(デフォルト: false)
|
||||
|
||||
**戻り値:** `Promise<WorkflowExecutionResult>`
|
||||
**戻り値:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
`async: true`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
|
||||
|
||||
##### getWorkflowStatus()
|
||||
|
||||
@@ -114,7 +119,7 @@ console.log('Is deployed:', status.isDeployed);
|
||||
|
||||
##### validateWorkflow()
|
||||
|
||||
ワークフローが実行準備ができているか検証します。
|
||||
ワークフローが実行準備ができているかを検証します。
|
||||
|
||||
```typescript
|
||||
const isReady = await client.validateWorkflow('workflow-id');
|
||||
@@ -128,28 +133,117 @@ if (isReady) {
|
||||
|
||||
**戻り値:** `Promise<boolean>`
|
||||
|
||||
##### executeWorkflowSync()
|
||||
##### getJobStatus()
|
||||
|
||||
<Callout type="info">
|
||||
現在、このメソッドは `executeWorkflow()` と同一です。すべての実行は同期的に行われるためです。このメソッドは、将来的に非同期実行が追加された際の互換性のために提供されています。
|
||||
</Callout>
|
||||
|
||||
ワークフローを実行します(現在は同期的、`executeWorkflow()` と同じ)。
|
||||
非同期ジョブ実行のステータスを取得します。
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWorkflowSync('workflow-id', {
|
||||
input: { data: 'some input' },
|
||||
timeout: 60000
|
||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
||||
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
|
||||
if (status.status === 'completed') {
|
||||
console.log('Output:', status.output);
|
||||
}
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `taskId` (string): 非同期実行から返されたタスクID
|
||||
|
||||
**戻り値:** `Promise<JobStatus>`
|
||||
|
||||
**レスポンスフィールド:**
|
||||
- `success` (boolean): リクエストが成功したかどうか
|
||||
- `taskId` (string): タスクID
|
||||
- `status` (string): 次のいずれか `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (object): `startedAt`, `completedAt`, および `duration` を含む
|
||||
- `output` (any, オプション): ワークフロー出力(完了時)
|
||||
- `error` (any, オプション): エラー詳細(失敗時)
|
||||
- `estimatedDuration` (number, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
|
||||
|
||||
##### executeWithRetry()
|
||||
|
||||
レート制限エラー時に指数バックオフを使用して自動的に再試行するワークフロー実行。
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Hello' },
|
||||
timeout: 30000
|
||||
}, {
|
||||
maxRetries: 3, // Maximum number of retries
|
||||
initialDelay: 1000, // Initial delay in ms (1 second)
|
||||
maxDelay: 30000, // Maximum delay in ms (30 seconds)
|
||||
backoffMultiplier: 2 // Exponential backoff multiplier
|
||||
});
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `workflowId` (string): 実行するワークフローのID
|
||||
- `options` (ExecutionOptions, オプション):
|
||||
- `input` (any): ワークフローに渡す入力データ
|
||||
- `timeout` (number): 初期リクエストのタイムアウト(ミリ秒)
|
||||
- `options` (ExecutionOptions, オプション): `executeWorkflow()`と同じ
|
||||
- `retryOptions` (RetryOptions, オプション):
|
||||
- `maxRetries` (number): 最大再試行回数(デフォルト: 3)
|
||||
- `initialDelay` (number): 初期遅延(ミリ秒)(デフォルト: 1000)
|
||||
- `maxDelay` (number): 最大遅延(ミリ秒)(デフォルト: 30000)
|
||||
- `backoffMultiplier` (number): バックオフ乗数(デフォルト: 2)
|
||||
|
||||
**戻り値:** `Promise<WorkflowExecutionResult>`
|
||||
**戻り値:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
再試行ロジックは、サンダリングハード問題を防ぐために±25%のジッターを含む指数バックオフ(1秒→2秒→4秒→8秒...)を使用します。APIが`retry-after`ヘッダーを提供する場合、代わりにそれが使用されます。
|
||||
|
||||
##### getRateLimitInfo()
|
||||
|
||||
最後のAPIレスポンスから現在のレート制限情報を取得します。
|
||||
|
||||
```typescript
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Limit:', rateLimitInfo.limit);
|
||||
console.log('Remaining:', rateLimitInfo.remaining);
|
||||
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
```
|
||||
|
||||
**戻り値:** `RateLimitInfo | null`
|
||||
|
||||
##### getUsageLimits()
|
||||
|
||||
アカウントの現在の使用制限とクォータ情報を取得します。
|
||||
|
||||
```typescript
|
||||
const limits = await client.getUsageLimits();
|
||||
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
|
||||
console.log('Current period cost:', limits.usage.currentPeriodCost);
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
```
|
||||
|
||||
**戻り値:** `Promise<UsageLimits>`
|
||||
|
||||
**レスポンス構造:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
success: boolean
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
async: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
authType: string // 'api' or 'manual'
|
||||
}
|
||||
usage: {
|
||||
currentPeriodCost: number
|
||||
limit: number
|
||||
plan: string // e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### setApiKey()
|
||||
|
||||
@@ -167,7 +261,7 @@ client.setApiKey('new-api-key');
|
||||
client.setBaseUrl('https://my-custom-domain.com');
|
||||
```
|
||||
|
||||
## 型
|
||||
## 型定義
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
|
||||
}
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```typescript
|
||||
interface AsyncExecutionResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
status: 'queued';
|
||||
createdAt: string;
|
||||
links: {
|
||||
status: string; // e.g., "/api/jobs/{taskId}"
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```typescript
|
||||
@@ -198,6 +306,45 @@ interface WorkflowStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```typescript
|
||||
interface RateLimitInfo {
|
||||
limit: number;
|
||||
remaining: number;
|
||||
reset: number;
|
||||
retryAfter?: number;
|
||||
}
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```typescript
|
||||
interface UsageLimits {
|
||||
success: boolean;
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
async: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
authType: string;
|
||||
};
|
||||
usage: {
|
||||
currentPeriodCost: number;
|
||||
limit: number;
|
||||
plan: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```typescript
|
||||
@@ -207,9 +354,16 @@ class SimStudioError extends Error {
|
||||
}
|
||||
```
|
||||
|
||||
**一般的なエラーコード:**
|
||||
- `UNAUTHORIZED`: 無効なAPIキー
|
||||
- `TIMEOUT`: リクエストがタイムアウトしました
|
||||
- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
|
||||
- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
|
||||
- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
|
||||
|
||||
## 例
|
||||
|
||||
### 基本的なワークフローの実行
|
||||
### 基本的なワークフロー実行
|
||||
|
||||
<Steps>
|
||||
<Step title="クライアントの初期化">
|
||||
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function runWorkflow() {
|
||||
@@ -271,7 +425,7 @@ runWorkflow();
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithErrorHandling() {
|
||||
@@ -315,14 +469,14 @@ async function executeWithErrorHandling() {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Development configuration
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
|
||||
baseUrl: process.env.SIM_BASE_URL // optional
|
||||
});
|
||||
```
|
||||
|
||||
@@ -333,21 +487,21 @@ async function executeWithErrorHandling() {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Production configuration with validation
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
|
||||
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
|
||||
});
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Node.js Expressとの統合
|
||||
### Node.js Express統合
|
||||
|
||||
Express.jsサーバーとの統合:
|
||||
|
||||
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const app = express();
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
app.use(express.json());
|
||||
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
export default async function handler(
|
||||
@@ -430,7 +584,7 @@ export default async function handler(
|
||||
|
||||
### ブラウザでの使用
|
||||
|
||||
ブラウザで使用する場合(適切なCORS設定が必要):
|
||||
ブラウザでの使用(適切なCORS設定が必要):
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
@@ -464,7 +618,7 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
|
||||
```
|
||||
|
||||
<Callout type="warning">
|
||||
ブラウザでSDKを使用する際は、機密性の高いAPIキーを公開しないよう注意してください。バックエンドプロキシや権限が制限された公開APIキーの使用を検討してください。
|
||||
ブラウザでSDKを使用する場合、機密性の高いAPIキーを公開しないよう注意してください。バックエンドプロキシや権限が制限された公開APIキーの使用を検討してください。
|
||||
</Callout>
|
||||
|
||||
### Reactフックの例
|
||||
@@ -476,7 +630,7 @@ import { useState, useCallback } from 'react';
|
||||
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
interface UseWorkflowResult {
|
||||
@@ -532,7 +686,7 @@ function WorkflowComponent() {
|
||||
<button onClick={handleExecute} disabled={loading}>
|
||||
{loading ? 'Executing...' : 'Execute Workflow'}
|
||||
</button>
|
||||
|
||||
|
||||
{error && <div>Error: {error.message}</div>}
|
||||
{result && (
|
||||
<div>
|
||||
@@ -545,38 +699,267 @@ function WorkflowComponent() {
|
||||
}
|
||||
```
|
||||
|
||||
## APIキーの取得方法
|
||||
### 非同期ワークフロー実行
|
||||
|
||||
長時間実行タスク向けに非同期でワークフローを実行:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeAsync() {
|
||||
try {
|
||||
// Start async execution
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { data: 'large dataset' },
|
||||
async: true // Execute asynchronously
|
||||
});
|
||||
|
||||
// Check if result is an async execution
|
||||
if ('taskId' in result) {
|
||||
console.log('Task ID:', result.taskId);
|
||||
console.log('Status endpoint:', result.links.status);
|
||||
|
||||
// Poll for completion
|
||||
let status = await client.getJobStatus(result.taskId);
|
||||
|
||||
while (status.status === 'queued' || status.status === 'processing') {
|
||||
console.log('Current status:', status.status);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
|
||||
status = await client.getJobStatus(result.taskId);
|
||||
}
|
||||
|
||||
if (status.status === 'completed') {
|
||||
console.log('Workflow completed!');
|
||||
console.log('Output:', status.output);
|
||||
console.log('Duration:', status.metadata.duration);
|
||||
} else {
|
||||
console.error('Workflow failed:', status.error);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
executeAsync();
|
||||
```
|
||||
|
||||
### レート制限とリトライ
|
||||
|
||||
指数バックオフによるレート制限の自動処理:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithRetryHandling() {
|
||||
try {
|
||||
// Automatically retries on rate limit
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Process this' }
|
||||
}, {
|
||||
maxRetries: 5,
|
||||
initialDelay: 1000,
|
||||
maxDelay: 60000,
|
||||
backoffMultiplier: 2
|
||||
});
|
||||
|
||||
console.log('Success:', result);
|
||||
} catch (error) {
|
||||
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
||||
console.error('Rate limit exceeded after all retries');
|
||||
|
||||
// Check rate limit info
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 使用状況モニタリング
|
||||
|
||||
アカウントの使用状況と制限のモニタリング:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function checkUsage() {
|
||||
try {
|
||||
const limits = await client.getUsageLimits();
|
||||
|
||||
console.log('=== Rate Limits ===');
|
||||
console.log('Sync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.sync.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
|
||||
|
||||
console.log('\nAsync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.async.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.async.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.async.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.async.isLimited);
|
||||
|
||||
console.log('\n=== Usage ===');
|
||||
console.log('Current period cost:
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithStreaming() {
|
||||
try {
|
||||
// 特定のブロック出力のストリーミングを有効化
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { message: 'Count to five' },
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content'] // blockName.attribute形式を使用
|
||||
});
|
||||
|
||||
console.log('ワークフロー結果:', result);
|
||||
} catch (error) {
|
||||
console.error('エラー:', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**React Streaming Example:**
|
||||
|
||||
```typescript
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function StreamingWorkflow() {
|
||||
const [output, setOutput] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const executeStreaming = async () => {
|
||||
setLoading(true);
|
||||
setOutput('');
|
||||
|
||||
// IMPORTANT: Make this API call from your backend server, not the browser
|
||||
// Never expose your API key in client-side code
|
||||
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: 'Generate a story',
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content']
|
||||
})
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (reader) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.chunk) {
|
||||
setOutput(prev => prev + parsed.chunk);
|
||||
} else if (parsed.event === 'done') {
|
||||
console.log('Execution complete:', parsed.metadata);
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip invalid JSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<button onClick={executeStreaming} disabled={loading}>
|
||||
{loading ? 'Generating...' : 'Start Streaming'}
|
||||
</button>
|
||||
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
<Step title="Simにログイン">
|
||||
[Sim](https://sim.ai)に移動してアカウントにログインします。
|
||||
<Step title="Log in to Sim">
|
||||
Navigate to [Sim](https://sim.ai) and log in to your account.
|
||||
</Step>
|
||||
<Step title="ワークフローを開く">
|
||||
プログラムで実行したいワークフローに移動します。
|
||||
<Step title="Open your workflow">
|
||||
Navigate to the workflow you want to execute programmatically.
|
||||
</Step>
|
||||
<Step title="ワークフローをデプロイ">
|
||||
まだデプロイされていない場合は、「デプロイ」をクリックしてワークフローをデプロイします。
|
||||
<Step title="Deploy your workflow">
|
||||
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
|
||||
</Step>
|
||||
<Step title="APIキーを作成または選択">
|
||||
デプロイ処理中に、APIキーを選択または作成します。
|
||||
<Step title="Create or select an API key">
|
||||
During the deployment process, select or create an API key.
|
||||
</Step>
|
||||
<Step title="APIキーをコピー">
|
||||
TypeScript/JavaScriptアプリケーションで使用するAPIキーをコピーします。
|
||||
<Step title="Copy the API key">
|
||||
Copy the API key to use in your TypeScript/JavaScript application.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Callout type="warning">
|
||||
APIキーは安全に保管し、バージョン管理システムにコミットしないでください。環境変数や安全な設定管理を使用してください。
|
||||
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
|
||||
</Callout>
|
||||
|
||||
## 要件
|
||||
## Requirements
|
||||
|
||||
- Node.js 16以上
|
||||
- TypeScript 5.0以上(TypeScriptプロジェクトの場合)
|
||||
- Node.js 16+
|
||||
- TypeScript 5.0+ (for TypeScript projects)
|
||||
|
||||
## TypeScriptサポート
|
||||
## TypeScript Support
|
||||
|
||||
このSDKはTypeScriptで書かれており、完全な型安全性を提供します:
|
||||
The SDK is written in TypeScript and provides full type safety:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
@@ -588,7 +971,7 @@ import {
|
||||
|
||||
// Type-safe client initialization
|
||||
const client: SimStudioClient = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Type-safe workflow execution
|
||||
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
|
||||
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
|
||||
```
|
||||
|
||||
## ライセンス
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
|
||||
Apache-2.0
|
||||
|
||||
@@ -38,6 +38,84 @@ curl -X POST \
|
||||
|
||||
成功したレスポンスはエグゼキュータからシリアル化された実行結果を返します。エラーは検証、認証、またはワークフローの失敗を表示します。
|
||||
|
||||
## ストリーミングレスポンス
|
||||
|
||||
リアルタイムストリーミングを有効にすると、ワークフローの出力が生成されるたびに文字単位で受信できます。これはAIの応答をユーザーに段階的に表示するのに役立ちます。
|
||||
|
||||
### リクエストパラメータ
|
||||
|
||||
ストリーミングを有効にするには、これらのパラメータを追加してください:
|
||||
|
||||
- `stream` - Server-Sent Events (SSE)ストリーミングを有効にするには `true` に設定します
|
||||
- `selectedOutputs` - ストリーミングするブロック出力の配列(例:`["agent1.content"]`)
|
||||
|
||||
### ブロック出力フォーマット
|
||||
|
||||
`blockName.attribute` フォーマットを使用して、ストリーミングするブロック出力を指定します:
|
||||
- フォーマット:`"blockName.attribute"`(例:Agent 1ブロックの内容をストリーミングしたい場合は、`"agent1.content"` を使用します)
|
||||
- ブロック名は大文字小文字を区別せず、スペースは無視されます
|
||||
|
||||
### リクエスト例
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Count to five",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
### レスポンスフォーマット
|
||||
|
||||
ストリーミングレスポンスはServer-Sent Events (SSE)フォーマットを使用します:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
各イベントには以下が含まれます:
|
||||
- **ストリーミングチャンク**:`{"blockId": "...", "chunk": "text"}` - 生成されるリアルタイムテキスト
|
||||
- **最終イベント**:`{"event": "done", ...}` - 実行メタデータと完全な結果
|
||||
- **ターミネーター**:`[DONE]` - ストリーム終了を示す信号
|
||||
|
||||
### 複数ブロックのストリーミング
|
||||
|
||||
`selectedOutputs` に複数のブロックが含まれる場合、各チャンクはどのブロックから生成されたかを示します:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Process this request",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content", "agent2.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
各チャンクの `blockId` フィールドを使用して、出力を正しいUI要素にルーティングできます:
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
|
||||
|
||||
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
|
||||
|
||||
data: {"blockId":"agent1-uuid","chunk":" complete"}
|
||||
```
|
||||
|
||||
## 出力リファレンス
|
||||
|
||||
| リファレンス | 説明 |
|
||||
@@ -45,7 +123,7 @@ curl -X POST \
|
||||
| `<api.field>` | 入力フォーマットで定義されたフィールド |
|
||||
| `<api.input>` | 構造化されたリクエスト本文全体 |
|
||||
|
||||
入力フォーマットが定義されていない場合、エグゼキュータは生のJSONを `<api.input>` のみで公開します。
|
||||
入力フォーマットが定義されていない場合、エグゼキューターは `<api.input>` でのみ生のJSONを公開します。
|
||||
|
||||
<Callout type="warning">
|
||||
ワークフローには1つのAPIトリガーのみ含めることができます。変更後は新しいデプロイメントを公開して、エンドポイントを最新の状態に保ってください。
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
官方的 Python SDK 允许您通过 Python 应用程序以编程方式执行工作流。
|
||||
|
||||
<Callout type="info">
|
||||
Python SDK 支持 Python 3.8+,并提供同步工作流执行。目前所有工作流执行均为同步模式。
|
||||
Python SDK 支持 Python 3.8+,具备异步执行支持、自动速率限制(带指数退避)以及使用情况跟踪功能。
|
||||
</Callout>
|
||||
|
||||
## 安装
|
||||
@@ -72,10 +72,15 @@ result = client.execute_workflow(
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str): 要执行的工作流 ID
|
||||
- `input_data` (dict, 可选): 传递给工作流的输入数据
|
||||
- `timeout` (float, 可选): 超时时间(以秒为单位,默认值:30.0)
|
||||
- `input_data` (dict, optional): 传递给工作流的输入数据
|
||||
- `timeout` (float, optional): 超时时间(以秒为单位,默认值:30.0)
|
||||
- `stream` (bool, optional): 启用流式响应(默认值:False)
|
||||
- `selected_outputs` (list[str], optional): 以 `blockName.attribute` 格式阻止输出流(例如,`["agent1.content"]`)
|
||||
- `async_execution` (bool, optional): 异步执行(默认值:False)
|
||||
|
||||
**返回值:** `WorkflowExecutionResult`
|
||||
**返回值:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
当 `async_execution=True` 时,立即返回任务 ID 以供轮询。否则,等待完成。
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
@@ -103,32 +108,122 @@ if is_ready:
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str):工作流的 ID
|
||||
- `workflow_id` (str): 工作流的 ID
|
||||
|
||||
**返回值:** `bool`
|
||||
|
||||
##### execute_workflow_sync()
|
||||
##### get_job_status()
|
||||
|
||||
<Callout type="info">
|
||||
当前,此方法与 `execute_workflow()` 相同,因为所有执行都是同步的。提供此方法是为了在将来添加异步执行时保持兼容性。
|
||||
</Callout>
|
||||
|
||||
执行工作流(当前为同步,与 `execute_workflow()` 相同)。
|
||||
获取异步任务执行的状态。
|
||||
|
||||
```python
|
||||
result = client.execute_workflow_sync(
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `task_id` (str): 异步执行返回的任务 ID
|
||||
|
||||
**返回值:** `Dict[str, Any]`
|
||||
|
||||
**响应字段:**
|
||||
- `success` (bool): 请求是否成功
|
||||
- `taskId` (str): 任务 ID
|
||||
- `status` (str): 可能的值包括 `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): 包含 `startedAt`, `completedAt` 和 `duration`
|
||||
- `output` (any, optional): 工作流输出(完成时)
|
||||
- `error` (any, optional): 错误详情(失败时)
|
||||
- `estimatedDuration` (int, optional): 估计持续时间(以毫秒为单位,处理中/排队时)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
使用指数退避在速率限制错误上自动重试执行工作流。
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"data": "some input"},
|
||||
timeout=60.0
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str):要执行的工作流 ID
|
||||
- `input_data` (dict, optional):传递给工作流的输入数据
|
||||
- `timeout` (float):初始请求的超时时间(以秒为单位)
|
||||
- `workflow_id` (str): 要执行的工作流 ID
|
||||
- `input_data` (dict, optional): 传递给工作流的输入数据
|
||||
- `timeout` (float, optional): 超时时间(以秒为单位)
|
||||
- `stream` (bool, optional): 启用流式响应
|
||||
- `selected_outputs` (list, optional): 阻止输出流
|
||||
- `async_execution` (bool, optional): 异步执行
|
||||
- `max_retries` (int, optional): 最大重试次数(默认值:3)
|
||||
- `initial_delay` (float, optional): 初始延迟时间(以秒为单位,默认值:1.0)
|
||||
- `max_delay` (float, optional): 最大延迟时间(以秒为单位,默认值:30.0)
|
||||
- `backoff_multiplier` (float, optional): 退避倍数(默认值:2.0)
|
||||
|
||||
**返回值:** `WorkflowExecutionResult`
|
||||
**返回值:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
重试逻辑使用指数退避(1 秒 → 2 秒 → 4 秒 → 8 秒...),并带有 ±25% 的抖动以防止惊群效应。如果 API 提供了 `retry-after` 标头,则会使用该标头。
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
从上一次 API 响应中获取当前的速率限制信息。
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**返回值:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
获取您的账户当前的使用限制和配额信息。
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**返回值:** `UsageLimits`
|
||||
|
||||
**响应结构:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@@ -181,6 +288,27 @@ class WorkflowStatus:
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**常见错误代码:**
|
||||
- `UNAUTHORIZED`: 无效的 API 密钥
|
||||
- `TIMEOUT`: 请求超时
|
||||
- `RATE_LIMIT_EXCEEDED`: 超出速率限制
|
||||
- `USAGE_LIMIT_EXCEEDED`: 超出使用限制
|
||||
- `EXECUTION_ERROR`: 工作流执行失败
|
||||
|
||||
## 示例
|
||||
|
||||
### 基本工作流执行
|
||||
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
@@ -252,7 +387,7 @@ run_workflow()
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
@@ -284,7 +419,7 @@ from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
@@ -298,7 +433,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
@@ -339,6 +474,230 @@ for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### 异步工作流执行
|
||||
|
||||
为长时间运行的任务异步执行工作流:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### 速率限制与重试
|
||||
|
||||
通过指数退避自动处理速率限制:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### 使用监控
|
||||
|
||||
监控您的账户使用情况和限制:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### 流式工作流执行
|
||||
|
||||
通过实时流式响应执行工作流:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
流式响应遵循服务器发送事件 (SSE) 格式:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flask 流式示例:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### 环境配置
|
||||
|
||||
使用环境变量配置客户端:
|
||||
@@ -352,8 +711,8 @@ for result in results:
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIMSTUDIO_API_KEY"),
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -365,13 +724,13 @@ for result in results:
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIMSTUDIO_API_KEY")
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
@@ -382,15 +741,15 @@ for result in results:
|
||||
|
||||
<Steps>
|
||||
<Step title="登录 Sim">
|
||||
访问 [Sim](https://sim.ai) 并登录您的账户。
|
||||
前往 [Sim](https://sim.ai) 并登录您的账户。
|
||||
</Step>
|
||||
<Step title="打开您的工作流">
|
||||
导航到您想要以编程方式执行的工作流。
|
||||
前往您想要以编程方式执行的工作流。
|
||||
</Step>
|
||||
<Step title="部署您的工作流">
|
||||
点击“部署”以部署您的工作流(如果尚未部署)。
|
||||
如果尚未部署,请点击“部署”以部署您的工作流。
|
||||
</Step>
|
||||
<Step title="创建或选择 API 密钥">
|
||||
<Step title="创建或选择一个 API 密钥">
|
||||
在部署过程中,选择或创建一个 API 密钥。
|
||||
</Step>
|
||||
<Step title="复制 API 密钥">
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
Sim 的官方 TypeScript/JavaScript SDK 提供完整的类型安全,支持 Node.js 和浏览器环境,允许您从 Node.js 应用程序、Web 应用程序和其他 JavaScript 环境中以编程方式执行工作流。目前,所有工作流执行均为同步。
|
||||
Sim 的官方 TypeScript/JavaScript SDK 提供完整的类型安全,支持 Node.js 和浏览器环境,允许您从 Node.js 应用程序、Web 应用程序和其他 JavaScript 环境中以编程方式执行工作流。
|
||||
|
||||
<Callout type="info">
|
||||
TypeScript SDK 提供完整的类型安全,支持 Node.js 和浏览器环境。目前,所有工作流执行均为同步。
|
||||
TypeScript SDK 提供完整的类型安全、异步执行支持、带有指数回退的自动速率限制以及使用跟踪。
|
||||
</Callout>
|
||||
|
||||
## 安装
|
||||
@@ -91,12 +91,17 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflowId`(字符串):要执行的工作流的 ID
|
||||
- `options`(ExecutionOptions,可选):
|
||||
- `input`(任意类型):传递给工作流的输入数据
|
||||
- `timeout`(数字):超时时间(以毫秒为单位,默认值:30000)
|
||||
- `workflowId` (字符串): 要执行的工作流的 ID
|
||||
- `options` (ExecutionOptions,可选):
|
||||
- `input` (任意类型): 传递给工作流的输入数据
|
||||
- `timeout` (数字): 超时时间(以毫秒为单位,默认值:30000)
|
||||
- `stream` (布尔值): 启用流式响应(默认值:false)
|
||||
- `selectedOutputs` (字符串数组): 以 `blockName.attribute` 格式阻止流中的输出(例如,`["agent1.content"]`)
|
||||
- `async` (布尔值): 异步执行(默认值:false)
|
||||
|
||||
**返回值:** `Promise<WorkflowExecutionResult>`
|
||||
**返回值:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
当 `async: true` 时,立即返回一个用于轮询的任务 ID。否则,等待完成。
|
||||
|
||||
##### getWorkflowStatus()
|
||||
|
||||
@@ -108,7 +113,7 @@ console.log('Is deployed:', status.isDeployed);
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflowId`(字符串):工作流的 ID
|
||||
- `workflowId` (字符串): 工作流的 ID
|
||||
|
||||
**返回值:** `Promise<WorkflowStatus>`
|
||||
|
||||
@@ -124,32 +129,121 @@ if (isReady) {
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflowId`(字符串):工作流的 ID
|
||||
- `workflowId` (字符串): 工作流的 ID
|
||||
|
||||
**返回值:** `Promise<boolean>`
|
||||
|
||||
##### executeWorkflowSync()
|
||||
##### getJobStatus()
|
||||
|
||||
<Callout type="info">
|
||||
当前,此方法与 `executeWorkflow()` 相同,因为所有执行都是同步的。提供此方法是为了在将来添加异步执行时保持兼容性。
|
||||
</Callout>
|
||||
|
||||
执行工作流(当前为同步,与 `executeWorkflow()` 相同)。
|
||||
获取异步任务执行的状态。
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWorkflowSync('workflow-id', {
|
||||
input: { data: 'some input' },
|
||||
timeout: 60000
|
||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
||||
console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
|
||||
if (status.status === 'completed') {
|
||||
console.log('Output:', status.output);
|
||||
}
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `taskId` (字符串): 异步执行返回的任务 ID
|
||||
|
||||
**返回值:** `Promise<JobStatus>`
|
||||
|
||||
**响应字段:**
|
||||
- `success` (布尔值): 请求是否成功
|
||||
- `taskId` (字符串): 任务 ID
|
||||
- `status` (字符串): 可能的值包括 `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (对象): 包含 `startedAt`, `completedAt` 和 `duration`
|
||||
- `output` (任意类型,可选): 工作流输出(完成时)
|
||||
- `error` (任意类型,可选): 错误详情(失败时)
|
||||
- `estimatedDuration` (数字,可选): 估计持续时间(以毫秒为单位,处理中/排队时)
|
||||
|
||||
##### executeWithRetry()
|
||||
|
||||
使用指数退避机制,在遇到速率限制错误时自动重试执行工作流。
|
||||
|
||||
```typescript
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Hello' },
|
||||
timeout: 30000
|
||||
}, {
|
||||
maxRetries: 3, // Maximum number of retries
|
||||
initialDelay: 1000, // Initial delay in ms (1 second)
|
||||
maxDelay: 30000, // Maximum delay in ms (30 seconds)
|
||||
backoffMultiplier: 2 // Exponential backoff multiplier
|
||||
});
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflowId`(字符串):要执行的工作流的 ID
|
||||
- `options`(ExecutionOptions,可选):
|
||||
- `input`(任意类型):传递给工作流的输入数据
|
||||
- `timeout`(数字):初始请求的超时时间(以毫秒为单位)
|
||||
- `options`(ExecutionOptions,可选):与 `executeWorkflow()` 相同
|
||||
- `retryOptions`(RetryOptions,可选):
|
||||
- `maxRetries`(数字):最大重试次数(默认值:3)
|
||||
- `initialDelay`(数字):初始延迟时间(以毫秒为单位,默认值:1000)
|
||||
- `maxDelay`(数字):最大延迟时间(以毫秒为单位,默认值:30000)
|
||||
- `backoffMultiplier`(数字):退避倍数(默认值:2)
|
||||
|
||||
**返回值:** `Promise<WorkflowExecutionResult>`
|
||||
**返回值:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
||||
|
||||
重试逻辑使用指数退避(1秒 → 2秒 → 4秒 → 8秒...),并带有 ±25% 的抖动以防止蜂拥效应。如果 API 提供了 `retry-after` 头,则会使用该头。
|
||||
|
||||
##### getRateLimitInfo()
|
||||
|
||||
从上一次 API 响应中获取当前速率限制信息。
|
||||
|
||||
```typescript
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Limit:', rateLimitInfo.limit);
|
||||
console.log('Remaining:', rateLimitInfo.remaining);
|
||||
console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
```
|
||||
|
||||
**返回值:** `RateLimitInfo | null`
|
||||
|
||||
##### getUsageLimits()
|
||||
|
||||
获取您的账户当前的使用限制和配额信息。
|
||||
|
||||
```typescript
|
||||
const limits = await client.getUsageLimits();
|
||||
console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log('Async requests remaining:', limits.rateLimit.async.remaining);
|
||||
console.log('Current period cost:', limits.usage.currentPeriodCost);
|
||||
console.log('Plan:', limits.usage.plan);
|
||||
```
|
||||
|
||||
**返回值:** `Promise<UsageLimits>`
|
||||
|
||||
**响应结构:**
|
||||
|
||||
```typescript
|
||||
{
|
||||
success: boolean
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
async: {
|
||||
isLimited: boolean
|
||||
limit: number
|
||||
remaining: number
|
||||
resetAt: string
|
||||
}
|
||||
authType: string // 'api' or 'manual'
|
||||
}
|
||||
usage: {
|
||||
currentPeriodCost: number
|
||||
limit: number
|
||||
plan: string // e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### setApiKey()
|
||||
|
||||
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
|
||||
}
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```typescript
|
||||
interface AsyncExecutionResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
status: 'queued';
|
||||
createdAt: string;
|
||||
links: {
|
||||
status: string; // e.g., "/api/jobs/{taskId}"
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```typescript
|
||||
@@ -198,6 +306,45 @@ interface WorkflowStatus {
|
||||
}
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```typescript
|
||||
interface RateLimitInfo {
|
||||
limit: number;
|
||||
remaining: number;
|
||||
reset: number;
|
||||
retryAfter?: number;
|
||||
}
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```typescript
|
||||
interface UsageLimits {
|
||||
success: boolean;
|
||||
rateLimit: {
|
||||
sync: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
async: {
|
||||
isLimited: boolean;
|
||||
limit: number;
|
||||
remaining: number;
|
||||
resetAt: string;
|
||||
};
|
||||
authType: string;
|
||||
};
|
||||
usage: {
|
||||
currentPeriodCost: number;
|
||||
limit: number;
|
||||
plan: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```typescript
|
||||
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
|
||||
}
|
||||
```
|
||||
|
||||
**常见错误代码:**
|
||||
- `UNAUTHORIZED`: 无效的 API 密钥
|
||||
- `TIMEOUT`: 请求超时
|
||||
- `RATE_LIMIT_EXCEEDED`: 超出速率限制
|
||||
- `USAGE_LIMIT_EXCEEDED`: 超出使用限制
|
||||
- `EXECUTION_ERROR`: 工作流执行失败
|
||||
|
||||
## 示例
|
||||
|
||||
### 基本工作流执行
|
||||
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function runWorkflow() {
|
||||
@@ -271,7 +425,7 @@ runWorkflow();
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithErrorHandling() {
|
||||
@@ -315,14 +469,14 @@ async function executeWithErrorHandling() {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Development configuration
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
|
||||
baseUrl: process.env.SIM_BASE_URL // optional
|
||||
});
|
||||
```
|
||||
|
||||
@@ -333,14 +487,14 @@ async function executeWithErrorHandling() {
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
// Production configuration with validation
|
||||
const apiKey = process.env.SIMSTUDIO_API_KEY;
|
||||
const apiKey = process.env.SIM_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('SIMSTUDIO_API_KEY environment variable is required');
|
||||
throw new Error('SIM_API_KEY environment variable is required');
|
||||
}
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey,
|
||||
baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
|
||||
baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
|
||||
});
|
||||
```
|
||||
|
||||
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const app = express();
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
app.use(express.json());
|
||||
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
export default async function handler(
|
||||
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
|
||||
|
||||
### React Hook 示例
|
||||
|
||||
为工作流执行创建一个自定义 React Hook:
|
||||
为工作流执行创建自定义 React hook:
|
||||
|
||||
```typescript
|
||||
import { useState, useCallback } from 'react';
|
||||
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
interface UseWorkflowResult {
|
||||
@@ -532,7 +686,7 @@ function WorkflowComponent() {
|
||||
<button onClick={handleExecute} disabled={loading}>
|
||||
{loading ? 'Executing...' : 'Execute Workflow'}
|
||||
</button>
|
||||
|
||||
|
||||
{error && <div>Error: {error.message}</div>}
|
||||
{result && (
|
||||
<div>
|
||||
@@ -545,38 +699,267 @@ function WorkflowComponent() {
|
||||
}
|
||||
```
|
||||
|
||||
## 获取您的 API 密钥
|
||||
### 异步工作流执行
|
||||
|
||||
为长时间运行的任务异步执行工作流:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeAsync() {
|
||||
try {
|
||||
// Start async execution
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { data: 'large dataset' },
|
||||
async: true // Execute asynchronously
|
||||
});
|
||||
|
||||
// Check if result is an async execution
|
||||
if ('taskId' in result) {
|
||||
console.log('Task ID:', result.taskId);
|
||||
console.log('Status endpoint:', result.links.status);
|
||||
|
||||
// Poll for completion
|
||||
let status = await client.getJobStatus(result.taskId);
|
||||
|
||||
while (status.status === 'queued' || status.status === 'processing') {
|
||||
console.log('Current status:', status.status);
|
||||
await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
|
||||
status = await client.getJobStatus(result.taskId);
|
||||
}
|
||||
|
||||
if (status.status === 'completed') {
|
||||
console.log('Workflow completed!');
|
||||
console.log('Output:', status.output);
|
||||
console.log('Duration:', status.metadata.duration);
|
||||
} else {
|
||||
console.error('Workflow failed:', status.error);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
executeAsync();
|
||||
```
|
||||
|
||||
### 速率限制和重试
|
||||
|
||||
通过指数退避自动处理速率限制:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithRetryHandling() {
|
||||
try {
|
||||
// Automatically retries on rate limit
|
||||
const result = await client.executeWithRetry('workflow-id', {
|
||||
input: { message: 'Process this' }
|
||||
}, {
|
||||
maxRetries: 5,
|
||||
initialDelay: 1000,
|
||||
maxDelay: 60000,
|
||||
backoffMultiplier: 2
|
||||
});
|
||||
|
||||
console.log('Success:', result);
|
||||
} catch (error) {
|
||||
if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
|
||||
console.error('Rate limit exceeded after all retries');
|
||||
|
||||
// Check rate limit info
|
||||
const rateLimitInfo = client.getRateLimitInfo();
|
||||
if (rateLimitInfo) {
|
||||
console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 使用监控
|
||||
|
||||
监控您的账户使用情况和限制:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function checkUsage() {
|
||||
try {
|
||||
const limits = await client.getUsageLimits();
|
||||
|
||||
console.log('=== Rate Limits ===');
|
||||
console.log('Sync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.sync.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.sync.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.sync.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.sync.isLimited);
|
||||
|
||||
console.log('\nAsync requests:');
|
||||
console.log(' Limit:', limits.rateLimit.async.limit);
|
||||
console.log(' Remaining:', limits.rateLimit.async.remaining);
|
||||
console.log(' Resets at:', limits.rateLimit.async.resetAt);
|
||||
console.log(' Is limited:', limits.rateLimit.async.isLimited);
|
||||
|
||||
console.log('\n=== Usage ===');
|
||||
console.log('Current period cost:
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```typescript
|
||||
import { SimStudioClient } from 'simstudio-ts-sdk';
|
||||
|
||||
const client = new SimStudioClient({
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
async function executeWithStreaming() {
|
||||
try {
|
||||
// 为特定的块输出启用流式传输
|
||||
const result = await client.executeWorkflow('workflow-id', {
|
||||
input: { message: 'Count to five' },
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content'] // 使用 blockName.attribute 格式
|
||||
});
|
||||
|
||||
console.log('工作流结果:', result);
|
||||
} catch (error) {
|
||||
console.error('错误:', error);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**React Streaming Example:**
|
||||
|
||||
```typescript
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function StreamingWorkflow() {
|
||||
const [output, setOutput] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const executeStreaming = async () => {
|
||||
setLoading(true);
|
||||
setOutput('');
|
||||
|
||||
// 重要提示:请从您的后端服务器发起此 API 调用,而不是从浏览器发起
|
||||
// 切勿在客户端代码中暴露您的 API 密钥
|
||||
const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY! // 仅限服务器端环境变量
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: '生成一个故事',
|
||||
stream: true,
|
||||
selectedOutputs: ['agent1.content']
|
||||
})
|
||||
});
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (reader) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
const lines = chunk.split('\n\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.chunk) {
|
||||
setOutput(prev => prev + parsed.chunk);
|
||||
} else if (parsed.event === 'done') {
|
||||
console.log('执行完成:', parsed.metadata);
|
||||
}
|
||||
} catch (e) {
|
||||
// 跳过无效的 JSON
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<button onClick={executeStreaming} disabled={loading}>
|
||||
{loading ? '生成中...' : '开始流式处理'}
|
||||
</button>
|
||||
<div style={{ whiteSpace: 'pre-wrap' }}>{output}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
<Step title="登录 Sim">
|
||||
访问 [Sim](https://sim.ai) 并登录您的账户。
|
||||
<Step title="Log in to Sim">
|
||||
Navigate to [Sim](https://sim.ai) and log in to your account.
|
||||
</Step>
|
||||
<Step title="打开您的工作流">
|
||||
导航到您想要以编程方式执行的工作流。
|
||||
<Step title="Open your workflow">
|
||||
Navigate to the workflow you want to execute programmatically.
|
||||
</Step>
|
||||
<Step title="部署您的工作流">
|
||||
如果尚未部署,请点击“部署”以部署您的工作流。
|
||||
<Step title="Deploy your workflow">
|
||||
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
|
||||
</Step>
|
||||
<Step title="创建或选择一个 API 密钥">
|
||||
在部署过程中,选择或创建一个 API 密钥。
|
||||
<Step title="Create or select an API key">
|
||||
During the deployment process, select or create an API key.
|
||||
</Step>
|
||||
<Step title="复制 API 密钥">
|
||||
复制 API 密钥以在您的 TypeScript/JavaScript 应用程序中使用。
|
||||
<Step title="Copy the API key">
|
||||
Copy the API key to use in your TypeScript/JavaScript application.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Callout type="warning">
|
||||
请确保您的 API 密钥安全,切勿将其提交到版本控制中。使用环境变量或安全配置管理。
|
||||
Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
|
||||
</Callout>
|
||||
|
||||
## 要求
|
||||
## Requirements
|
||||
|
||||
- Node.js 16+
|
||||
- TypeScript 5.0+(适用于 TypeScript 项目)
|
||||
- TypeScript 5.0+ (for TypeScript projects)
|
||||
|
||||
## TypeScript 支持
|
||||
## TypeScript Support
|
||||
|
||||
SDK 是用 TypeScript 编写的,并提供完整的类型安全:
|
||||
The SDK is written in TypeScript and provides full type safety:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
@@ -586,22 +969,22 @@ import {
|
||||
SimStudioError
|
||||
} from 'simstudio-ts-sdk';
|
||||
|
||||
// Type-safe client initialization
|
||||
// 类型安全的客户端初始化
|
||||
const client: SimStudioClient = new SimStudioClient({
|
||||
apiKey: process.env.SIMSTUDIO_API_KEY!
|
||||
apiKey: process.env.SIM_API_KEY!
|
||||
});
|
||||
|
||||
// Type-safe workflow execution
|
||||
// 类型安全的工作流执行
|
||||
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
|
||||
input: {
|
||||
message: 'Hello, TypeScript!'
|
||||
message: '你好,TypeScript!'
|
||||
}
|
||||
});
|
||||
|
||||
// Type-safe status checking
|
||||
// 类型安全的状态检查
|
||||
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
|
||||
```
|
||||
|
||||
## 许可证
|
||||
|
||||
Apache-2.0
|
||||
Apache-2.0
|
||||
|
||||
@@ -38,6 +38,84 @@ curl -X POST \
|
||||
|
||||
成功的响应会返回来自执行器的序列化执行结果。错误会显示验证、认证或工作流失败的信息。
|
||||
|
||||
## 流式响应
|
||||
|
||||
启用实时流式传输以在生成时逐字符接收工作流输出。这对于向用户逐步显示 AI 响应非常有用。
|
||||
|
||||
### 请求参数
|
||||
|
||||
添加以下参数以启用流式传输:
|
||||
|
||||
- `stream` - 设置为 `true` 以启用服务器发送事件 (SSE) 流式传输
|
||||
- `selectedOutputs` - 要流式传输的块输出数组(例如,`["agent1.content"]`)
|
||||
|
||||
### 块输出格式
|
||||
|
||||
使用 `blockName.attribute` 格式指定要流式传输的块输出:
|
||||
- 格式:`"blockName.attribute"`(例如,如果您想流式传输 Agent 1 块的内容,可以使用 `"agent1.content"`)
|
||||
- 块名称不区分大小写,空格会被忽略
|
||||
|
||||
### 示例请求
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Count to five",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
### 响应格式
|
||||
|
||||
流式响应使用服务器发送事件 (SSE) 格式:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
每个事件包括:
|
||||
- **流式块**:`{"blockId": "...", "chunk": "text"}` - 实时生成的文本
|
||||
- **最终事件**:`{"event": "done", ...}` - 执行元数据和完整结果
|
||||
- **终止符**:`[DONE]` - 表示流结束
|
||||
|
||||
### 多块流式传输
|
||||
|
||||
当 `selectedOutputs` 包含多个块时,每个块会指示其来源:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
https://sim.ai/api/workflows/WORKFLOW_ID/execute \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-API-Key: YOUR_KEY' \
|
||||
-d '{
|
||||
"message": "Process this request",
|
||||
"stream": true,
|
||||
"selectedOutputs": ["agent1.content", "agent2.content"]
|
||||
}'
|
||||
```
|
||||
|
||||
每个块中的 `blockId` 字段可让您将输出路由到正确的 UI 元素:
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1-uuid","chunk":"Processing..."}
|
||||
|
||||
data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
|
||||
|
||||
data: {"blockId":"agent1-uuid","chunk":" complete"}
|
||||
```
|
||||
|
||||
## 输出参考
|
||||
|
||||
| 参考 | 描述 |
|
||||
@@ -48,5 +126,5 @@ curl -X POST \
|
||||
如果未定义输入格式,执行器仅在 `<api.input>` 处暴露原始 JSON。
|
||||
|
||||
<Callout type="warning">
|
||||
一个工作流只能包含一个 API 触发器。更改后发布新的部署,以确保端点保持最新。
|
||||
一个工作流只能包含一个 API 触发器。更改后发布新部署,以确保端点保持最新。
|
||||
</Callout>
|
||||
|
||||
@@ -2231,8 +2231,8 @@ checksums:
|
||||
d394ac42b56429e524dc5a771b0610b9:
|
||||
meta/title: 9da9098244c6c7a0ebcc3009cef66c7b
|
||||
content/0: 9218a2e190598690d0fc5c27c30f01bb
|
||||
content/1: 8a3feb937915c3191f1eecb10b94297d
|
||||
content/2: 99af1bfe8d1629acdb5a9229430af791
|
||||
content/1: 6c88f52bdb4e4a5668d1b25b5f444f48
|
||||
content/2: 7e827833339b6b4c6abdf154de7f9a0c
|
||||
content/3: 391128dee61b5d0d43eba88567aaef42
|
||||
content/4: 4d132e6346723ecf45c408afeab2757b
|
||||
content/5: d3df764a69d2926d10aed65ad8693e9f
|
||||
@@ -2254,79 +2254,155 @@ checksums:
|
||||
content/21: bd0e851fdde30c0e94c00b60f85d655e
|
||||
content/22: 837ca74ccf63f23333c54e010faf681c
|
||||
content/23: 8fb33cfc314b86d35df8ea1b10466f20
|
||||
content/24: 09e003c28fb1810e9afefe51324265fd
|
||||
content/25: 07fb2d6b16c75839a32d383f12419ca5
|
||||
content/26: 9fd0cd99a879360d355d91e9cfb41531
|
||||
content/27: f6fed8ebf67ba12199b4474a754969ae
|
||||
content/28: bcee3febe1be079e53aea841e2b08b3b
|
||||
content/29: f00be560fcd4ff3f53d61c70c249597b
|
||||
content/30: fa4fa1573c369fcc2eee57d7852caf9c
|
||||
content/31: fa68c1f8c9ea3dba96b2ea7edb8680d7
|
||||
content/32: 304e608d459ef53f308e6ea1f6f8b54a
|
||||
content/33: cb63e267fb16a7aaeea45c4ca29bf697
|
||||
content/34: f00be560fcd4ff3f53d61c70c249597b
|
||||
content/35: f7c266db4d07d040f8f788be598476cf
|
||||
content/36: cd306281b5136831335e6376edb1e822
|
||||
content/37: afb7b7f27d48deb3154da26135e17fb8
|
||||
content/38: 179000198c9cd78601b5c862e9c8659f
|
||||
content/39: 28c37db52f39323e125fcaf0e60911db
|
||||
content/40: 8de5041c3c93b70619ec1723f657757f
|
||||
content/41: 07fb2d6b16c75839a32d383f12419ca5
|
||||
content/42: 65db7855a79ab283c6409e81a7703d19
|
||||
content/43: 191fb7087315702a36001c69d745ebed
|
||||
content/44: f6113edfd7a0062af4d88bcf31a73f45
|
||||
content/45: 1e84fc1eee794c20e3411b3a34a02278
|
||||
content/46: ec31e300f79185f734d32b1cfaf8a137
|
||||
content/47: f7ad301d02e8826921644a5268f13f32
|
||||
content/48: 025d60fdaf93713ccb34abcbc71dfa2b
|
||||
content/49: 70a9ece41fdad09f3a06ca0efdb92ae9
|
||||
content/50: 356d67409ae0d82a72d052573314f660
|
||||
content/51: bb172f1678686d9d49666c516716de24
|
||||
content/52: 529711647eccfdf031dbb5bc70581986
|
||||
content/53: 9a84c92505eb468916637fcf2cef70f2
|
||||
content/54: 225bca3fb37bd38cd645e8a698abbfa9
|
||||
content/55: 33b9b1e9744318597da4b925b0995be2
|
||||
content/56: 6afe3b62e6d53c3dcd07149abcab4c05
|
||||
content/57: b6363faee219321c16d41a9c3f8d3bdd
|
||||
content/58: 24ef65dd034a2881a978d8d0065fb258
|
||||
content/59: b8b23ab79a7eb32c6f8d5f49f43c51f6
|
||||
content/60: be358297e2bbb9ab4689d11d072611d1
|
||||
content/61: b2eaadc86870d2e64b55c89e0348ef93
|
||||
content/62: 450265802cb0ba5b435b74b9cac1bf23
|
||||
content/63: b735ede8764e4b2dfb25967e33ab5143
|
||||
content/64: 0f881e586a03c4b916456c73fad48358
|
||||
content/65: 62bbbeca4e0500062f5cdbbc1614dde0
|
||||
content/66: 55d47e12745c1b0b62c9bdf6e8449730
|
||||
content/67: 1d873c7ccd87f564e2b30387b40ee9e9
|
||||
content/68: 3304a33dfb626c6e2267c062e8956a9d
|
||||
content/69: 77256b36307e9f7293bd00063239c8ee
|
||||
content/70: ac686382ccbb07d75b0f141af500dfd5
|
||||
content/71: 38f7308105b0843792c8e2fb93e1895d
|
||||
content/72: 62f6977928b2f596ed7d54383d1e779d
|
||||
content/73: 3415d6c5ad1df56b212d69519bdf0fea
|
||||
content/74: d1a104e667cd2284ab5b3fead4a6ba1d
|
||||
content/75: a81d7cd4a644a0061dad3a5973b4fe06
|
||||
content/76: 981447969a71fd038049e9d9f40f4f8c
|
||||
content/77: 531941216d31cb1947367c3c02127baa
|
||||
content/78: bf1afa789fdfa5815faaf43574341e90
|
||||
content/79: 5f2fe55d098d4e4f438af595708b2280
|
||||
content/80: 41b8f7cf8899a0e92e255a3f845f9584
|
||||
content/81: 5040bab65fb6bb77862f8098d16afbb5
|
||||
content/82: a88260a5b5e23da73e4534376adeb193
|
||||
content/83: e5e2329cdc226186fe9d44767528a4a0
|
||||
content/84: 1773624e9ac3d5132b505894ef51977e
|
||||
content/85: d62c9575cc66feec7589fba95c9f7aee
|
||||
content/86: 7af652c5407ae7e156ab27b21a4f26d3
|
||||
content/87: 4aa69b29cca745389dea8cd74eba4f83
|
||||
content/88: 46877074b69519165997fa0968169611
|
||||
content/89: d8ebc69b18baf83689ba315e7b4946ea
|
||||
content/90: ecd571818ddf3d31b08b80a25958a662
|
||||
content/91: 7dcdf2fbf3fce3f94987046506e12a9b
|
||||
content/24: f8fbd9375113651be0f2498bdacde0ef
|
||||
content/25: 2c57d87589b65f785e0fbbda60d32e54
|
||||
content/26: 2541eb37fca67a6d7c5a10f8067127a3
|
||||
content/27: 9fd0cd99a879360d355d91e9cfb41531
|
||||
content/28: f6fed8ebf67ba12199b4474a754969ae
|
||||
content/29: bcee3febe1be079e53aea841e2b08b3b
|
||||
content/30: f00be560fcd4ff3f53d61c70c249597b
|
||||
content/31: fa4fa1573c369fcc2eee57d7852caf9c
|
||||
content/32: fa68c1f8c9ea3dba96b2ea7edb8680d7
|
||||
content/33: 304e608d459ef53f308e6ea1f6f8b54a
|
||||
content/34: cb63e267fb16a7aaeea45c4ca29bf697
|
||||
content/35: f00be560fcd4ff3f53d61c70c249597b
|
||||
content/36: f7c266db4d07d040f8f788be598476cf
|
||||
content/37: d93b320646fde160c0fdd1936ee63cfb
|
||||
content/38: c76e2089a41880dd6feac759ec8867c2
|
||||
content/39: 0d61b9631788e64d1c1335b08c907107
|
||||
content/40: 5ec50e6f56bd0a9a55fae14fa02185d9
|
||||
content/41: 47bdc3ba4908bf1ce3d1a0a8f646b339
|
||||
content/42: 5e8af7125448a6021a6ea431486dd587
|
||||
content/43: 15017685691db74889cc6116373e44a5
|
||||
content/44: 4d4ad5d56e800e5d227a07339300fc7f
|
||||
content/45: c035728b4b81d006a18ba9ba7b9c638d
|
||||
content/46: f1c9ad60574d19a5f93c837ab9d88890
|
||||
content/47: 2c57d87589b65f785e0fbbda60d32e54
|
||||
content/48: e7019a0e12f7295893c5822356fc0df0
|
||||
content/49: 5912d8d9df5bbe435579d8eb0677685c
|
||||
content/50: 4e1da4edce56837c750ce8da4c0e6cf2
|
||||
content/51: 3d35097bb958e6eddd6976aeb1fe9e41
|
||||
content/52: 78dce98d48ba070dbe100ee2a94cb17d
|
||||
content/53: 38ec85acf292485e3dd837a29208fd2c
|
||||
content/54: 58d582d90c8715f5570f76fed2be508d
|
||||
content/55: 7d2b7134d447172c502b5f40fc3b38e6
|
||||
content/56: 4a71171863d7329da6813b94772c0d4e
|
||||
content/57: 1900d5b89dbca22d7a455bdc3367f0f5
|
||||
content/58: 45126feb4fc831922a7edabfa2d54e4a
|
||||
content/59: 65db7855a79ab283c6409e81a7703d19
|
||||
content/60: 191fb7087315702a36001c69d745ebed
|
||||
content/61: f6113edfd7a0062af4d88bcf31a73f45
|
||||
content/62: 1e84fc1eee794c20e3411b3a34a02278
|
||||
content/63: ec31e300f79185f734d32b1cfaf8a137
|
||||
content/64: f7ad301d02e8826921644a5268f13f32
|
||||
content/65: 025d60fdaf93713ccb34abcbc71dfa2b
|
||||
content/66: 70a9ece41fdad09f3a06ca0efdb92ae9
|
||||
content/67: 356d67409ae0d82a72d052573314f660
|
||||
content/68: 5a80933fb21deea17a0a200564f0111b
|
||||
content/69: 9527ba2ab5ddd8001baaaaf25f1a7acc
|
||||
content/70: bb172f1678686d9d49666c516716de24
|
||||
content/71: 529711647eccfdf031dbb5bc70581986
|
||||
content/72: baa408b1603f35a8e24dd60b88773c72
|
||||
content/73: c42a9f19d0678d8d1a36cf1f93e4a5ba
|
||||
content/74: f6180f2341e8a7ae24afb05d7a185340
|
||||
content/75: 8196e101e443ec2aac13cefd90a6d454
|
||||
content/76: 9a84c92505eb468916637fcf2cef70f2
|
||||
content/77: 225bca3fb37bd38cd645e8a698abbfa9
|
||||
content/78: 7431c09b430effd69de843ee0fbaafe8
|
||||
content/79: 33b9b1e9744318597da4b925b0995be2
|
||||
content/80: 6afe3b62e6d53c3dcd07149abcab4c05
|
||||
content/81: b6363faee219321c16d41a9c3f8d3bdd
|
||||
content/82: 2449c8e8f55e2bf3f732527352d35c9f
|
||||
content/83: b8b23ab79a7eb32c6f8d5f49f43c51f6
|
||||
content/84: be358297e2bbb9ab4689d11d072611d1
|
||||
content/85: eb774a8a86d778153905b0f6cdcdf517
|
||||
content/86: 450265802cb0ba5b435b74b9cac1bf23
|
||||
content/87: b735ede8764e4b2dfb25967e33ab5143
|
||||
content/88: 0f881e586a03c4b916456c73fad48358
|
||||
content/89: f51639ab2b7ccac72b850e2064e694e9
|
||||
content/90: 55d47e12745c1b0b62c9bdf6e8449730
|
||||
content/91: e6223d6aa9efa444282e58d7d9a99ced
|
||||
content/92: 3304a33dfb626c6e2267c062e8956a9d
|
||||
content/93: 77256b36307e9f7293bd00063239c8ee
|
||||
content/94: ac686382ccbb07d75b0f141af500dfd5
|
||||
content/95: 5610b6538a29672335b572d6f35d0657
|
||||
content/96: 62f6977928b2f596ed7d54383d1e779d
|
||||
content/97: 3415d6c5ad1df56b212d69519bdf0fea
|
||||
content/98: 6bd60468d8cc072c5fe4214481fa9f60
|
||||
content/99: a81d7cd4a644a0061dad3a5973b4fe06
|
||||
content/100: 981447969a71fd038049e9d9f40f4f8c
|
||||
content/101: 531941216d31cb1947367c3c02127baa
|
||||
content/102: bf1afa789fdfa5815faaf43574341e90
|
||||
content/103: 5f2fe55d098d4e4f438af595708b2280
|
||||
content/104: 41b8f7cf8899a0e92e255a3f845f9584
|
||||
content/105: 61ddd890032078ffd2da931b1d153b6d
|
||||
content/106: 7873aa7487bc3e8a4826d65c1760a4a0
|
||||
content/107: 98182d9aabe14d5bad43a5ee76a75eab
|
||||
content/108: 2bdb01e4bcb08b1d99f192acf8e2fba7
|
||||
content/109: 7079d9c00b1e1882c329b7e9b8f74552
|
||||
content/110: 0f9d65eaf6e8de43c3d5fa7e62bc838d
|
||||
content/111: 58c8e9d2d0ac37efd958203b8fbc8193
|
||||
content/112: 7859d36a7a6d0122c0818b28ee29aa3e
|
||||
content/113: ce185e7b041b8f95ebc11370d3e0aad9
|
||||
content/114: 701e9bf4fd4d0669da0584eac5bd96e0
|
||||
content/115: d1bab8ec5a51a9da5464eb47e2a16b50
|
||||
content/116: da658275cc81a20f9cf7e4c66c7af1e3
|
||||
content/117: 377d7c99a5df4b72166946573f7210b8
|
||||
content/118: 3afc03a5ab1dc9db2bfa092b0ac4826a
|
||||
content/119: 18ddfcaf2be4a6f1d9819407dad9ce7c
|
||||
content/120: 2f6263b2e95f09f7e4842453f4bf4a0a
|
||||
content/121: 4603578d6b314b662f45564a34ca430d
|
||||
content/122: cf4c97eb254d0bd6ea6633344621c2c2
|
||||
content/123: 7b4640989fab002039936156f857eb21
|
||||
content/124: 65ca9f08745b47b4cce8ea8247d043bf
|
||||
content/125: 162b4180611ff0a53b782e4dc8109293
|
||||
content/126: 6b367a189eb53cb198e3666023def89c
|
||||
content/127: dbb2125cefcf618849600c1eccae8a64
|
||||
content/128: 04eedda0da3767b06e6017c559e05414
|
||||
content/129: 661688450606eb09d8faee1468e88331
|
||||
content/130: 8ff8367c3246103b3e3e02499e34ae0b
|
||||
content/131: 44678bda9166f746da1d61b694ced482
|
||||
content/132: a5e75db27c0a901f4cacf6598f450e6c
|
||||
content/133: d1bab8ec5a51a9da5464eb47e2a16b50
|
||||
content/134: da658275cc81a20f9cf7e4c66c7af1e3
|
||||
content/135: 377d7c99a5df4b72166946573f7210b8
|
||||
content/136: 3afc03a5ab1dc9db2bfa092b0ac4826a
|
||||
content/137: 18ddfcaf2be4a6f1d9819407dad9ce7c
|
||||
content/138: 2f6263b2e95f09f7e4842453f4bf4a0a
|
||||
content/139: 4603578d6b314b662f45564a34ca430d
|
||||
content/140: cf4c97eb254d0bd6ea6633344621c2c2
|
||||
content/141: 7b4640989fab002039936156f857eb21
|
||||
content/142: 65ca9f08745b47b4cce8ea8247d043bf
|
||||
content/143: 162b4180611ff0a53b782e4dc8109293
|
||||
content/144: 6b367a189eb53cb198e3666023def89c
|
||||
content/145: dbb2125cefcf618849600c1eccae8a64
|
||||
content/146: 04eedda0da3767b06e6017c559e05414
|
||||
content/147: 661688450606eb09d8faee1468e88331
|
||||
content/148: 8ff8367c3246103b3e3e02499e34ae0b
|
||||
content/149: 44678bda9166f746da1d61b694ced482
|
||||
content/150: 192a89879084dd7a74a6f44bcecae958
|
||||
content/151: 41c2bb95317d7c0421817a2b1a68cc09
|
||||
content/152: 4c95f9fa55f698f220577380dff95011
|
||||
content/153: 9ef273d776aada1b2cff3452f08ff985
|
||||
content/154: 100e12673551d4ceb5b906b1b9c65059
|
||||
content/155: ce253674cd7c49320203cda2bdd3685b
|
||||
content/156: 8910afcea8c205a28256eb30de6a1f26
|
||||
content/157: 4d7ad757d2c70fdff7834146d38dddd8
|
||||
content/158: a88260a5b5e23da73e4534376adeb193
|
||||
content/159: e5e2329cdc226186fe9d44767528a4a0
|
||||
content/160: 1773624e9ac3d5132b505894ef51977e
|
||||
content/161: d62c9575cc66feec7589fba95c9f7aee
|
||||
content/162: 7af652c5407ae7e156ab27b21a4f26d3
|
||||
content/163: 4aa69b29cca745389dea8cd74eba4f83
|
||||
content/164: 46877074b69519165997fa0968169611
|
||||
content/165: 2e81908c18033109ac82a054b3fafd3d
|
||||
content/166: ecd571818ddf3d31b08b80a25958a662
|
||||
content/167: 7dcdf2fbf3fce3f94987046506e12a9b
|
||||
27578f1315b6f1b7418d5e0d6042722e:
|
||||
meta/title: 8c555594662512e95f28e20d3880f186
|
||||
content/0: 9218a2e190598690d0fc5c27c30f01bb
|
||||
content/1: feca29d7cbb17f461bc8706f142cb475
|
||||
content/2: 65705e1bef9ddf2674454c20e77af61f
|
||||
content/2: 9cb58e08402fc80050ad6a62cae3f643
|
||||
content/3: 391128dee61b5d0d43eba88567aaef42
|
||||
content/4: fa77bab0a8660a7999bf3104921aac5c
|
||||
content/5: e8839cfb872185cea76973caaa7f84e0
|
||||
@@ -2342,67 +2418,107 @@ checksums:
|
||||
content/15: 64005abb7b5c1c3edef8970a8a7d17b2
|
||||
content/16: 837ca74ccf63f23333c54e010faf681c
|
||||
content/17: 626054376e08522e7195a60c34db9af8
|
||||
content/18: 03c715df3c784e92ce1c0ce6a4dcd2e3
|
||||
content/19: dcb92b9a1f222393f2e81cdae239885c
|
||||
content/20: 2f5c7e73763a1884893739283f0d0659
|
||||
content/21: f6fed8ebf67ba12199b4474a754969ae
|
||||
content/22: c8f9a1d43885f2b9fe8b64c79d8af8b8
|
||||
content/23: e1a2ca39583549a731d942082e1fa07c
|
||||
content/24: 14e077bdb64d87457870efa215384654
|
||||
content/25: c2e86eaf4b7d1cd53ed8172264337cc9
|
||||
content/26: 304e608d459ef53f308e6ea1f6f8b54a
|
||||
content/27: 9d04294f8385211535ed7622d164871f
|
||||
content/28: e1a2ca39583549a731d942082e1fa07c
|
||||
content/29: 279c20e11af33abb94993e8ea3e80669
|
||||
content/30: eec7d8395f8cf305106deb7b25384ecf
|
||||
content/31: 921824b44c391f8a0cdc5ce4cd283e77
|
||||
content/32: d5aaccb9399a1255f986b703921594e5
|
||||
content/33: dba855cc28255e4576026e3da0cdf05b
|
||||
content/34: 17fdd93c6df75b108e352a62a195bc73
|
||||
content/35: dcb92b9a1f222393f2e81cdae239885c
|
||||
content/36: fb6fddfdf4753a36c7878ef60b345822
|
||||
content/37: 191fb7087315702a36001c69d745ebed
|
||||
content/38: 1ffef0a4e0d6a6bbca85776c113e1164
|
||||
content/39: 61caafaf79e863df9525c4baf72c14e1
|
||||
content/40: ec31e300f79185f734d32b1cfaf8a137
|
||||
content/41: 65a172d64ffca3b03c6e0ed08f0bd821
|
||||
content/42: 2db387754d7fb3539bcb986dfaac1c8c
|
||||
content/43: e118d997ba48a5230ec70a564d436860
|
||||
content/44: 77268362a748dafad471f31acfd230dc
|
||||
content/45: b55b3773df2dfba66b6e675db7e2470e
|
||||
content/46: 70a9ece41fdad09f3a06ca0efdb92ae9
|
||||
content/47: 646ee615d86faf3b6a8da03115a30efa
|
||||
content/48: bb172f1678686d9d49666c516716de24
|
||||
content/49: a025b3b746d72e0f676f58703ee19a47
|
||||
content/50: 9a84c92505eb468916637fcf2cef70f2
|
||||
content/51: a4c78d85ed9be63b07b657166510f440
|
||||
content/52: 33b9b1e9744318597da4b925b0995be2
|
||||
content/53: 6afe3b62e6d53c3dcd07149abcab4c05
|
||||
content/54: b6363faee219321c16d41a9c3f8d3bdd
|
||||
content/55: f939bc99e05d04e1d52bf4b9ec3f1825
|
||||
content/56: b8b23ab79a7eb32c6f8d5f49f43c51f6
|
||||
content/57: be358297e2bbb9ab4689d11d072611d1
|
||||
content/58: d8fcefba15a99bf4a9cf71c985097677
|
||||
content/59: 7d098f0349c782f389431377ee512e92
|
||||
content/60: 22b39537f6a104803389469d211154e4
|
||||
content/61: 5dc147f9fe5e8117dfa6c94808c4ff54
|
||||
content/62: f29d6bfd74ba3fee0b90180f620b4f47
|
||||
content/63: 2a59466500b62e57481fe27692a3ed0f
|
||||
content/64: d3ac9ea2a213cafb1f871dda8f6e6fe0
|
||||
content/65: 450265802cb0ba5b435b74b9cac1bf23
|
||||
content/66: b735ede8764e4b2dfb25967e33ab5143
|
||||
content/67: 0f881e586a03c4b916456c73fad48358
|
||||
content/68: 3f643fb43f3a022a449ded1e7c4db8bf
|
||||
content/69: 55d47e12745c1b0b62c9bdf6e8449730
|
||||
content/70: 166b3975e39841707381880ae4df3984
|
||||
content/71: 3304a33dfb626c6e2267c062e8956a9d
|
||||
content/72: a88260a5b5e23da73e4534376adeb193
|
||||
content/73: cc31ae653c5642b223ec634888de29c6
|
||||
content/74: 1773624e9ac3d5132b505894ef51977e
|
||||
content/75: d62c9575cc66feec7589fba95c9f7aee
|
||||
content/76: 8df5939abc771b5d24c115ef20d42d6f
|
||||
content/77: ecd571818ddf3d31b08b80a25958a662
|
||||
content/78: 7dcdf2fbf3fce3f94987046506e12a9b
|
||||
content/18: 12153919e0229ac0a3699de043eae2a2
|
||||
content/19: 59ceca96004d0746448717245eb65c5c
|
||||
content/20: a0ff152e09498effe90572fe5cdfad1b
|
||||
content/21: 2f5c7e73763a1884893739283f0d0659
|
||||
content/22: f6fed8ebf67ba12199b4474a754969ae
|
||||
content/23: c8f9a1d43885f2b9fe8b64c79d8af8b8
|
||||
content/24: e1a2ca39583549a731d942082e1fa07c
|
||||
content/25: 14e077bdb64d87457870efa215384654
|
||||
content/26: c2e86eaf4b7d1cd53ed8172264337cc9
|
||||
content/27: 304e608d459ef53f308e6ea1f6f8b54a
|
||||
content/28: 9d04294f8385211535ed7622d164871f
|
||||
content/29: e1a2ca39583549a731d942082e1fa07c
|
||||
content/30: 279c20e11af33abb94993e8ea3e80669
|
||||
content/31: 9e772c161a4b008c2f1db15a967d07ab
|
||||
content/32: c76e2089a41880dd6feac759ec8867c2
|
||||
content/33: 5d9a7b1e681cbe8f02def7eefabb0ac5
|
||||
content/34: b4e0e90d40a60a024f64f80b193dcb48
|
||||
content/35: b9f46c03c91c1070dd3ca0eba461f29b
|
||||
content/36: fbecf63d14b56039ba44471f7a8afd4a
|
||||
content/37: 58701f4ec097582ee105714a9363ccbe
|
||||
content/38: 4d4ad5d56e800e5d227a07339300fc7f
|
||||
content/39: 7f2a42a752279d7871064a21d0891b73
|
||||
content/40: 8462e2271506b0545c62e5f70865a2f4
|
||||
content/41: 59ceca96004d0746448717245eb65c5c
|
||||
content/42: e7019a0e12f7295893c5822356fc0df0
|
||||
content/43: 29d376146cd1149025028c61eb33e7ab
|
||||
content/44: 4e1da4edce56837c750ce8da4c0e6cf2
|
||||
content/45: 666a62d9fd54735b2adcad6277b3e07f
|
||||
content/46: db012cfc3749d025f1dd40b5db1d9d63
|
||||
content/47: 478fe7c3fbdd5e7d779691c9a09795c9
|
||||
content/48: 58d582d90c8715f5570f76fed2be508d
|
||||
content/49: 710baf5cf18c21cc284e70df97b36f40
|
||||
content/50: 6363bbb118f3f51ca1b1acf3e9ec2f7c
|
||||
content/51: 1900d5b89dbca22d7a455bdc3367f0f5
|
||||
content/52: 959f29f44825109bf4bb16129896a8dd
|
||||
content/53: fb6fddfdf4753a36c7878ef60b345822
|
||||
content/54: 191fb7087315702a36001c69d745ebed
|
||||
content/55: 1ffef0a4e0d6a6bbca85776c113e1164
|
||||
content/56: 61caafaf79e863df9525c4baf72c14e1
|
||||
content/57: ec31e300f79185f734d32b1cfaf8a137
|
||||
content/58: 65a172d64ffca3b03c6e0ed08f0bd821
|
||||
content/59: 2db387754d7fb3539bcb986dfaac1c8c
|
||||
content/60: e118d997ba48a5230ec70a564d436860
|
||||
content/61: 77268362a748dafad471f31acfd230dc
|
||||
content/62: b55b3773df2dfba66b6e675db7e2470e
|
||||
content/63: 70a9ece41fdad09f3a06ca0efdb92ae9
|
||||
content/64: 646ee615d86faf3b6a8da03115a30efa
|
||||
content/65: 5a80933fb21deea17a0a200564f0111b
|
||||
content/66: a82d5e5fad0fbfd60ca97e5312d11941
|
||||
content/67: bb172f1678686d9d49666c516716de24
|
||||
content/68: a025b3b746d72e0f676f58703ee19a47
|
||||
content/69: baa408b1603f35a8e24dd60b88773c72
|
||||
content/70: c0cc113d0001826984f9c096c79cd18b
|
||||
content/71: f6180f2341e8a7ae24afb05d7a185340
|
||||
content/72: 3d414a5669f152cd296af27b61104858
|
||||
content/73: 9a84c92505eb468916637fcf2cef70f2
|
||||
content/74: a4c78d85ed9be63b07b657166510f440
|
||||
content/75: 7431c09b430effd69de843ee0fbaafe8
|
||||
content/76: 33b9b1e9744318597da4b925b0995be2
|
||||
content/77: 6afe3b62e6d53c3dcd07149abcab4c05
|
||||
content/78: b6363faee219321c16d41a9c3f8d3bdd
|
||||
content/79: 08410ce9f0ec358b3c7230a56bc66399
|
||||
content/80: b8b23ab79a7eb32c6f8d5f49f43c51f6
|
||||
content/81: be358297e2bbb9ab4689d11d072611d1
|
||||
content/82: 09fea7c0d742a0eefa77e982e848de6c
|
||||
content/83: 7d098f0349c782f389431377ee512e92
|
||||
content/84: 22b39537f6a104803389469d211154e4
|
||||
content/85: d9ec74ab28b264d76f797fdae7c8f3d3
|
||||
content/86: f29d6bfd74ba3fee0b90180f620b4f47
|
||||
content/87: 2a59466500b62e57481fe27692a3ed0f
|
||||
content/88: cbbb123fc3a12bf2ab72dc1bbe373a6e
|
||||
content/89: 7873aa7487bc3e8a4826d65c1760a4a0
|
||||
content/90: 98182d9aabe14d5bad43a5ee76a75eab
|
||||
content/91: 67bfa8ae3e22d9a949f08c79a40b8df5
|
||||
content/92: 7079d9c00b1e1882c329b7e9b8f74552
|
||||
content/93: 0f9d65eaf6e8de43c3d5fa7e62bc838d
|
||||
content/94: bcf0ce93a4493586ad32c20d9d2b285c
|
||||
content/95: 7859d36a7a6d0122c0818b28ee29aa3e
|
||||
content/96: ce185e7b041b8f95ebc11370d3e0aad9
|
||||
content/97: dae96b41f0c029b464f02ac65d3c5796
|
||||
content/98: 41c2bb95317d7c0421817a2b1a68cc09
|
||||
content/99: 4c95f9fa55f698f220577380dff95011
|
||||
content/100: 6695bd47a05f9963134d8a71abb3d298
|
||||
content/101: 100e12673551d4ceb5b906b1b9c65059
|
||||
content/102: ce253674cd7c49320203cda2bdd3685b
|
||||
content/103: 94d4346a735149c2a83f6d2a21b8ab4c
|
||||
content/104: 3ee4b16b8204ef3b5b7c0322ff636fab
|
||||
content/105: 450265802cb0ba5b435b74b9cac1bf23
|
||||
content/106: b735ede8764e4b2dfb25967e33ab5143
|
||||
content/107: 0f881e586a03c4b916456c73fad48358
|
||||
content/108: 4570af52d41ecda8d91e6bbe2bc19891
|
||||
content/109: 55d47e12745c1b0b62c9bdf6e8449730
|
||||
content/110: 82507d357ec8766f0173b9b1081c4c56
|
||||
content/111: 3304a33dfb626c6e2267c062e8956a9d
|
||||
content/112: a88260a5b5e23da73e4534376adeb193
|
||||
content/113: cc31ae653c5642b223ec634888de29c6
|
||||
content/114: 1773624e9ac3d5132b505894ef51977e
|
||||
content/115: d62c9575cc66feec7589fba95c9f7aee
|
||||
content/116: 8df5939abc771b5d24c115ef20d42d6f
|
||||
content/117: ecd571818ddf3d31b08b80a25958a662
|
||||
content/118: 7dcdf2fbf3fce3f94987046506e12a9b
|
||||
004fe5dc5ca33719cb175f3619fe5208:
|
||||
meta/title: be754b00d8a2c13c561e314f6f526515
|
||||
content/0: 7e581dbf3e581d503ac94f7fb7938b1f
|
||||
@@ -3970,7 +4086,25 @@ checksums:
|
||||
content/7: e73f4b831f5b77c71d7d86c83abcbf11
|
||||
content/8: 07e064793f3e0bbcb02c4dc6083b6daa
|
||||
content/9: a702b191c3f94458bee880d33853e0cb
|
||||
content/10: ce110ab5da3ff96f8cbf96ce3376fc51
|
||||
content/11: 83f9b3ab46b0501c8eb3989bec3f4f1b
|
||||
content/12: e00be80effb71b0acb014f9aa53dfbe1
|
||||
content/13: 847a381137856ded9faa5994fbc489fb
|
||||
content/10: c497057cbb9dd53599071f8550f327cd
|
||||
content/11: cc6e48f85d5c6bfc05f846341f2d5cc9
|
||||
content/12: 8a80a6a97da9bf375fac565f1caabb49
|
||||
content/13: 098cc8e062187eb877fe5e172a4aa467
|
||||
content/14: e452a7cb33d7cf2f7cf1804703edaa20
|
||||
content/15: 466cfd61b1d0fcd8fc93d867dfd0f3e3
|
||||
content/16: 377572316021236994f444e88949ef34
|
||||
content/17: 54852933b2cbe3deb3b1c3059dba6a15
|
||||
content/18: 9e66b045763abe053a3ba8d2c23e9aa1
|
||||
content/19: d34f0950591e3beb085e99db64d07d2f
|
||||
content/20: 8677ef07618f7289b04fef3cce8bf745
|
||||
content/21: c0e6d2790e369569e7f272a5ec9ae21a
|
||||
content/22: 93643a0d9d9745f131e4eabf7ead2018
|
||||
content/23: 89c7da6d2e8fbc25e303a7381e147237
|
||||
content/24: a8ec63597dc3a3564bc5f0c3a6e5f42c
|
||||
content/25: 379618989b6cd427b319cfdab523297d
|
||||
content/26: bc4c2e699a7514771276e90e9aee53ba
|
||||
content/27: 38e14193b679ef774c3db93d399e700e
|
||||
content/28: ce110ab5da3ff96f8cbf96ce3376fc51
|
||||
content/29: 83f9b3ab46b0501c8eb3989bec3f4f1b
|
||||
content/30: e00be80effb71b0acb014f9aa53dfbe1
|
||||
content/31: 847a381137856ded9faa5994fbc489fb
|
||||
|
||||
@@ -9,7 +9,7 @@ export function cn(...inputs: ClassValue[]) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full URL for an asset stored in Vercel Blob or local fallback
|
||||
* Get the full URL for an asset stored in Vercel Blob
|
||||
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
|
||||
* - Otherwise falls back to local static assets served from root path
|
||||
*/
|
||||
@@ -20,12 +20,3 @@ export function getAssetUrl(filename: string) {
|
||||
}
|
||||
return `/${filename}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full URL for a video asset stored in Vercel Blob or local fallback
|
||||
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
|
||||
* - Otherwise falls back to local static assets served from root path
|
||||
*/
|
||||
export function getVideoUrl(filename: string) {
|
||||
return getAssetUrl(filename)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
import { useEffect, useState } from 'react'
|
||||
import Image from 'next/image'
|
||||
import { getAssetUrl } from '@/lib/utils'
|
||||
import { inter } from '@/app/fonts/inter'
|
||||
|
||||
interface Testimonial {
|
||||
@@ -14,7 +13,6 @@ interface Testimonial {
|
||||
profileImage: string
|
||||
}
|
||||
|
||||
// Import all testimonials
|
||||
const allTestimonials: Testimonial[] = [
|
||||
{
|
||||
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
|
||||
@@ -22,7 +20,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@hasantoxr',
|
||||
viewCount: '515k',
|
||||
tweetUrl: 'https://x.com/hasantoxr/status/1912909502036525271',
|
||||
profileImage: getAssetUrl('twitter/hasan.jpg'),
|
||||
profileImage: '/twitter/hasan.jpg',
|
||||
},
|
||||
{
|
||||
text: "Drag-and-drop AI workflows for devs who'd rather build agents than babysit them.",
|
||||
@@ -30,7 +28,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@GithubProjects',
|
||||
viewCount: '90.4k',
|
||||
tweetUrl: 'https://x.com/GithubProjects/status/1906383555707490499',
|
||||
profileImage: getAssetUrl('twitter/github-projects.jpg'),
|
||||
profileImage: '/twitter/github-projects.jpg',
|
||||
},
|
||||
{
|
||||
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
|
||||
@@ -38,7 +36,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@lazukars',
|
||||
viewCount: '47.4k',
|
||||
tweetUrl: 'https://x.com/lazukars/status/1913136390503600575',
|
||||
profileImage: getAssetUrl('twitter/lazukars.png'),
|
||||
profileImage: '/twitter/lazukars.png',
|
||||
},
|
||||
{
|
||||
text: 'omfggggg this is the zapier of agent building\n\ni always believed that building agents and using ai should not be limited to technical people. i think this solves just that\n\nthe fact that this is also open source makes me so optimistic about the future of building with ai :)))\n\ncongrats @karabegemir & @typingwala !!!',
|
||||
@@ -46,7 +44,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@nizzyabi',
|
||||
viewCount: '6,269',
|
||||
tweetUrl: 'https://x.com/nizzyabi/status/1907864421227180368',
|
||||
profileImage: getAssetUrl('twitter/nizzy.jpg'),
|
||||
profileImage: '/twitter/nizzy.jpg',
|
||||
},
|
||||
{
|
||||
text: 'A very good looking agent workflow builder 🔥 and open source!',
|
||||
@@ -54,7 +52,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@xyflowdev',
|
||||
viewCount: '3,246',
|
||||
tweetUrl: 'https://x.com/xyflowdev/status/1909501499719438670',
|
||||
profileImage: getAssetUrl('twitter/xyflow.jpg'),
|
||||
profileImage: '/twitter/xyflow.jpg',
|
||||
},
|
||||
{
|
||||
text: "One of the best products I've seen in the space, and the hustle and grind I've seen from @karabegemir and @typingwala is insane. Sim is positioned to build something game-changing, and there's no better team for the job.\n\nCongrats on the launch 🚀 🎊 great things ahead!",
|
||||
@@ -62,7 +60,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@firestorm776',
|
||||
viewCount: '1,256',
|
||||
tweetUrl: 'https://x.com/firestorm776/status/1907896097735061598',
|
||||
profileImage: getAssetUrl('twitter/samarth.jpg'),
|
||||
profileImage: '/twitter/samarth.jpg',
|
||||
},
|
||||
{
|
||||
text: 'lfgg got access to @simstudioai via @zerodotemail 😎',
|
||||
@@ -70,7 +68,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@nizzyabi',
|
||||
viewCount: '1,762',
|
||||
tweetUrl: 'https://x.com/nizzyabi/status/1910482357821595944',
|
||||
profileImage: getAssetUrl('twitter/nizzy.jpg'),
|
||||
profileImage: '/twitter/nizzy.jpg',
|
||||
},
|
||||
{
|
||||
text: 'Feels like we\'re finally getting a "Photoshop moment" for AI devs—visual, intuitive, and fast enough to keep up with ideas mid-flow.',
|
||||
@@ -78,7 +76,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@syamrajk',
|
||||
viewCount: '2,784',
|
||||
tweetUrl: 'https://x.com/syamrajk/status/1912911980110946491',
|
||||
profileImage: getAssetUrl('twitter/syamrajk.jpg'),
|
||||
profileImage: '/twitter/syamrajk.jpg',
|
||||
},
|
||||
{
|
||||
text: 'The use cases are endless. Great work @simstudioai',
|
||||
@@ -86,7 +84,7 @@ const allTestimonials: Testimonial[] = [
|
||||
username: '@daniel_zkim',
|
||||
viewCount: '103',
|
||||
tweetUrl: 'https://x.com/daniel_zkim/status/1907891273664782708',
|
||||
profileImage: getAssetUrl('twitter/daniel.jpg'),
|
||||
profileImage: '/twitter/daniel.jpg',
|
||||
},
|
||||
]
|
||||
|
||||
@@ -95,11 +93,9 @@ export default function Testimonials() {
|
||||
const [isTransitioning, setIsTransitioning] = useState(false)
|
||||
const [isPaused, setIsPaused] = useState(false)
|
||||
|
||||
// Create an extended array for smooth infinite scrolling
|
||||
const extendedTestimonials = [...allTestimonials, ...allTestimonials]
|
||||
|
||||
useEffect(() => {
|
||||
// Set up automatic sliding every 3 seconds
|
||||
const interval = setInterval(() => {
|
||||
if (!isPaused) {
|
||||
setIsTransitioning(true)
|
||||
@@ -110,17 +106,15 @@ export default function Testimonials() {
|
||||
return () => clearInterval(interval)
|
||||
}, [isPaused])
|
||||
|
||||
// Reset position when reaching the end for infinite loop
|
||||
useEffect(() => {
|
||||
if (currentIndex >= allTestimonials.length) {
|
||||
setTimeout(() => {
|
||||
setIsTransitioning(false)
|
||||
setCurrentIndex(0)
|
||||
}, 500) // Match transition duration
|
||||
}, 500)
|
||||
}
|
||||
}, [currentIndex])
|
||||
|
||||
// Calculate the transform value
|
||||
const getTransformValue = () => {
|
||||
// Each card unit (card + separator) takes exactly 25% width
|
||||
return `translateX(-${currentIndex * 25}%)`
|
||||
|
||||
@@ -403,7 +403,10 @@ export function mockExecutionDependencies() {
|
||||
provider: 'provider',
|
||||
providerConfig: 'providerConfig',
|
||||
},
|
||||
workflow: { id: 'id', userId: 'userId' },
|
||||
workflow: {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
},
|
||||
workflowSchedule: {
|
||||
id: 'id',
|
||||
workflowId: 'workflowId',
|
||||
@@ -1113,12 +1116,20 @@ export function createMockDatabase(options: MockDatabaseOptions = {}) {
|
||||
|
||||
const createUpdateChain = () => ({
|
||||
set: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockImplementation(() => {
|
||||
if (updateOptions.throwError) {
|
||||
return Promise.reject(createDbError('update', updateOptions.errorMessage))
|
||||
}
|
||||
return Promise.resolve(updateOptions.results)
|
||||
}),
|
||||
where: vi.fn().mockImplementation(() => ({
|
||||
returning: vi.fn().mockImplementation(() => {
|
||||
if (updateOptions.throwError) {
|
||||
return Promise.reject(createDbError('update', updateOptions.errorMessage))
|
||||
}
|
||||
return Promise.resolve(updateOptions.results)
|
||||
}),
|
||||
then: vi.fn().mockImplementation((resolve) => {
|
||||
if (updateOptions.throwError) {
|
||||
return Promise.reject(createDbError('update', updateOptions.errorMessage))
|
||||
}
|
||||
return Promise.resolve(updateOptions.results).then(resolve)
|
||||
}),
|
||||
})),
|
||||
})),
|
||||
})
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import { userStats } from '@sim/db/schema'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
|
||||
import { checkInternalApiKey } from '@/lib/copilot/utils'
|
||||
import { isBillingEnabled } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
@@ -148,6 +149,9 @@ export async function POST(req: NextRequest) {
|
||||
addedTokens: totalTokens,
|
||||
})
|
||||
|
||||
// Check if user has hit overage threshold and bill incrementally
|
||||
await checkAndBillOverageThreshold(userId)
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
|
||||
logger.info(`[${requestId}] Cost update completed successfully`, {
|
||||
|
||||
@@ -6,7 +6,7 @@ import { z } from 'zod'
|
||||
import { renderOTPEmail } from '@/components/emails/render-email'
|
||||
import { sendEmail } from '@/lib/email/mailer'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getRedisClient, markMessageAsProcessed, releaseLock } from '@/lib/redis'
|
||||
import { getRedisClient } from '@/lib/redis'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { addCorsHeaders, setChatAuthCookie } from '@/app/api/chat/utils'
|
||||
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
|
||||
@@ -21,83 +21,52 @@ function generateOTP() {
|
||||
// We use 15 minutes (900 seconds) expiry for OTPs
|
||||
const OTP_EXPIRY = 15 * 60
|
||||
|
||||
// Store OTP in Redis
|
||||
async function storeOTP(email: string, chatId: string, otp: string): Promise<void> {
|
||||
async function storeOTP(email: string, chatId: string, otp: string): Promise<boolean> {
|
||||
const key = `otp:${email}:${chatId}`
|
||||
const redis = getRedisClient()
|
||||
|
||||
if (redis) {
|
||||
// Use Redis if available
|
||||
await redis.set(key, otp, 'EX', OTP_EXPIRY)
|
||||
} else {
|
||||
// Use the existing function as fallback to mark that an OTP exists
|
||||
await markMessageAsProcessed(key, OTP_EXPIRY)
|
||||
if (!redis) {
|
||||
logger.warn('Redis not available, OTP functionality requires Redis')
|
||||
return false
|
||||
}
|
||||
|
||||
// For the fallback case, we need to handle storing the OTP value separately
|
||||
// since markMessageAsProcessed only stores "1"
|
||||
const valueKey = `${key}:value`
|
||||
try {
|
||||
// Access the in-memory cache directly - hacky but works for fallback
|
||||
const inMemoryCache = (global as any).inMemoryCache
|
||||
if (inMemoryCache) {
|
||||
const fullKey = `processed:${valueKey}`
|
||||
const expiry = OTP_EXPIRY ? Date.now() + OTP_EXPIRY * 1000 : null
|
||||
inMemoryCache.set(fullKey, { value: otp, expiry })
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error storing OTP in fallback cache:', error)
|
||||
}
|
||||
try {
|
||||
await redis.set(key, otp, 'EX', OTP_EXPIRY)
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Error storing OTP in Redis:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Get OTP from Redis
|
||||
async function getOTP(email: string, chatId: string): Promise<string | null> {
|
||||
const key = `otp:${email}:${chatId}`
|
||||
const redis = getRedisClient()
|
||||
|
||||
if (redis) {
|
||||
// Use Redis if available
|
||||
return await redis.get(key)
|
||||
if (!redis) {
|
||||
return null
|
||||
}
|
||||
// Use the existing function as fallback - check if it exists
|
||||
const exists = await new Promise((resolve) => {
|
||||
try {
|
||||
// Check the in-memory cache directly - hacky but works for fallback
|
||||
const inMemoryCache = (global as any).inMemoryCache
|
||||
const fullKey = `processed:${key}`
|
||||
const cacheEntry = inMemoryCache?.get(fullKey)
|
||||
resolve(!!cacheEntry)
|
||||
} catch {
|
||||
resolve(false)
|
||||
}
|
||||
})
|
||||
|
||||
if (!exists) return null
|
||||
|
||||
// Try to get the value key
|
||||
const valueKey = `${key}:value`
|
||||
try {
|
||||
const inMemoryCache = (global as any).inMemoryCache
|
||||
const fullKey = `processed:${valueKey}`
|
||||
const cacheEntry = inMemoryCache?.get(fullKey)
|
||||
return cacheEntry?.value || null
|
||||
} catch {
|
||||
return await redis.get(key)
|
||||
} catch (error) {
|
||||
logger.error('Error getting OTP from Redis:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
// Delete OTP from Redis
|
||||
async function deleteOTP(email: string, chatId: string): Promise<void> {
|
||||
const key = `otp:${email}:${chatId}`
|
||||
const redis = getRedisClient()
|
||||
|
||||
if (redis) {
|
||||
// Use Redis if available
|
||||
if (!redis) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
await redis.del(key)
|
||||
} else {
|
||||
// Use the existing function as fallback
|
||||
await releaseLock(`processed:${key}`)
|
||||
await releaseLock(`processed:${key}:value`)
|
||||
} catch (error) {
|
||||
logger.error('Error deleting OTP from Redis:', error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,7 +146,17 @@ export async function POST(
|
||||
|
||||
const otp = generateOTP()
|
||||
|
||||
await storeOTP(email, deployment.id, otp)
|
||||
const stored = await storeOTP(email, deployment.id, otp)
|
||||
if (!stored) {
|
||||
logger.error(`[${requestId}] Failed to store OTP - Redis unavailable`)
|
||||
return addCorsHeaders(
|
||||
createErrorResponse(
|
||||
'Email verification temporarily unavailable, please try again later',
|
||||
503
|
||||
),
|
||||
request
|
||||
)
|
||||
}
|
||||
|
||||
const emailHtml = await renderOTPEmail(
|
||||
otp,
|
||||
|
||||
@@ -27,7 +27,7 @@ describe('Chat Identifier API Route', () => {
|
||||
const mockAddCorsHeaders = vi.fn().mockImplementation((response) => response)
|
||||
const mockValidateChatAuth = vi.fn().mockResolvedValue({ authorized: true })
|
||||
const mockSetChatAuthCookie = vi.fn()
|
||||
const mockExecuteWorkflowForChat = vi.fn().mockResolvedValue(createMockStream())
|
||||
const mockCreateStreamingResponse = vi.fn().mockResolvedValue(createMockStream())
|
||||
|
||||
const mockChatResult = [
|
||||
{
|
||||
@@ -72,7 +72,16 @@ describe('Chat Identifier API Route', () => {
|
||||
validateChatAuth: mockValidateChatAuth,
|
||||
setChatAuthCookie: mockSetChatAuthCookie,
|
||||
validateAuthToken: vi.fn().mockReturnValue(true),
|
||||
executeWorkflowForChat: mockExecuteWorkflowForChat,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/workflows/streaming', () => ({
|
||||
createStreamingResponse: mockCreateStreamingResponse,
|
||||
SSE_HEADERS: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
'X-Accel-Buffering': 'no',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/logs/console/logger', () => ({
|
||||
@@ -369,8 +378,23 @@ describe('Chat Identifier API Route', () => {
|
||||
expect(response.headers.get('Cache-Control')).toBe('no-cache')
|
||||
expect(response.headers.get('Connection')).toBe('keep-alive')
|
||||
|
||||
// Verify executeWorkflowForChat was called with correct parameters
|
||||
expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', 'conv-123')
|
||||
// Verify createStreamingResponse was called with correct workflow info
|
||||
expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
workflow: expect.objectContaining({
|
||||
id: 'workflow-id',
|
||||
userId: 'user-id',
|
||||
}),
|
||||
input: expect.objectContaining({
|
||||
input: 'Hello world',
|
||||
conversationId: 'conv-123',
|
||||
}),
|
||||
streamConfig: expect.objectContaining({
|
||||
isSecureMode: true,
|
||||
workflowTriggerType: 'chat',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle streaming response body correctly', async () => {
|
||||
@@ -399,8 +423,8 @@ describe('Chat Identifier API Route', () => {
|
||||
})
|
||||
|
||||
it('should handle workflow execution errors gracefully', async () => {
|
||||
const originalExecuteWorkflow = mockExecuteWorkflowForChat.getMockImplementation()
|
||||
mockExecuteWorkflowForChat.mockImplementationOnce(async () => {
|
||||
const originalStreamingResponse = mockCreateStreamingResponse.getMockImplementation()
|
||||
mockCreateStreamingResponse.mockImplementationOnce(async () => {
|
||||
throw new Error('Execution failed')
|
||||
})
|
||||
|
||||
@@ -417,8 +441,8 @@ describe('Chat Identifier API Route', () => {
|
||||
expect(data).toHaveProperty('error')
|
||||
expect(data).toHaveProperty('message', 'Execution failed')
|
||||
|
||||
if (originalExecuteWorkflow) {
|
||||
mockExecuteWorkflowForChat.mockImplementation(originalExecuteWorkflow)
|
||||
if (originalStreamingResponse) {
|
||||
mockCreateStreamingResponse.mockImplementation(originalStreamingResponse)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -443,7 +467,7 @@ describe('Chat Identifier API Route', () => {
|
||||
expect(data).toHaveProperty('message', 'Invalid request body')
|
||||
})
|
||||
|
||||
it('should pass conversationId to executeWorkflowForChat when provided', async () => {
|
||||
it('should pass conversationId to streaming execution when provided', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
input: 'Hello world',
|
||||
conversationId: 'test-conversation-123',
|
||||
@@ -454,10 +478,13 @@ describe('Chat Identifier API Route', () => {
|
||||
|
||||
await POST(req, { params })
|
||||
|
||||
expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith(
|
||||
'chat-id',
|
||||
'Hello world',
|
||||
'test-conversation-123'
|
||||
expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
input: expect.objectContaining({
|
||||
input: 'Hello world',
|
||||
conversationId: 'test-conversation-123',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
@@ -469,7 +496,13 @@ describe('Chat Identifier API Route', () => {
|
||||
|
||||
await POST(req, { params })
|
||||
|
||||
expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', undefined)
|
||||
expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
input: expect.objectContaining({
|
||||
input: 'Hello world',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -6,7 +6,7 @@ import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import {
|
||||
addCorsHeaders,
|
||||
executeWorkflowForChat,
|
||||
processChatFiles,
|
||||
setChatAuthCookie,
|
||||
validateAuthToken,
|
||||
validateChatAuth,
|
||||
@@ -15,6 +15,9 @@ import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/
|
||||
|
||||
const logger = createLogger('ChatIdentifierAPI')
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'nodejs'
|
||||
|
||||
// This endpoint handles chat interactions via the identifier
|
||||
export async function POST(
|
||||
request: NextRequest,
|
||||
@@ -73,7 +76,7 @@ export async function POST(
|
||||
}
|
||||
|
||||
// Use the already parsed body
|
||||
const { input, password, email, conversationId } = parsedBody
|
||||
const { input, password, email, conversationId, files } = parsedBody
|
||||
|
||||
// If this is an authentication request (has password or email but no input),
|
||||
// set auth cookie and return success
|
||||
@@ -86,8 +89,8 @@ export async function POST(
|
||||
return response
|
||||
}
|
||||
|
||||
// For chat messages, create regular response
|
||||
if (!input) {
|
||||
// For chat messages, create regular response (allow empty input if files are present)
|
||||
if (!input && (!files || files.length === 0)) {
|
||||
return addCorsHeaders(createErrorResponse('No input provided', 400), request)
|
||||
}
|
||||
|
||||
@@ -106,18 +109,55 @@ export async function POST(
|
||||
}
|
||||
|
||||
try {
|
||||
// Execute workflow with structured input (input + conversationId for context)
|
||||
const result = await executeWorkflowForChat(deployment.id, input, conversationId)
|
||||
const selectedOutputs: string[] = []
|
||||
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
|
||||
for (const config of deployment.outputConfigs) {
|
||||
const outputId = config.path
|
||||
? `${config.blockId}_${config.path}`
|
||||
: `${config.blockId}_content`
|
||||
selectedOutputs.push(outputId)
|
||||
}
|
||||
}
|
||||
|
||||
// The result is always a ReadableStream that we can pipe to the client
|
||||
const streamResponse = new NextResponse(result, {
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
'X-Accel-Buffering': 'no',
|
||||
const { createStreamingResponse } = await import('@/lib/workflows/streaming')
|
||||
const { SSE_HEADERS } = await import('@/lib/utils')
|
||||
const { createFilteredResult } = await import('@/app/api/workflows/[id]/execute/route')
|
||||
|
||||
const workflowInput: any = { input, conversationId }
|
||||
if (files && Array.isArray(files) && files.length > 0) {
|
||||
logger.debug(`[${requestId}] Processing ${files.length} attached files`)
|
||||
|
||||
const executionId = crypto.randomUUID()
|
||||
const executionContext = {
|
||||
workspaceId: deployment.userId,
|
||||
workflowId: deployment.workflowId,
|
||||
executionId,
|
||||
}
|
||||
|
||||
const uploadedFiles = await processChatFiles(files, executionContext, requestId)
|
||||
|
||||
if (uploadedFiles.length > 0) {
|
||||
workflowInput.files = uploadedFiles
|
||||
logger.info(`[${requestId}] Successfully processed ${uploadedFiles.length} files`)
|
||||
}
|
||||
}
|
||||
|
||||
const stream = await createStreamingResponse({
|
||||
requestId,
|
||||
workflow: { id: deployment.workflowId, userId: deployment.userId, isDeployed: true },
|
||||
input: workflowInput,
|
||||
executingUserId: deployment.userId,
|
||||
streamConfig: {
|
||||
selectedOutputs,
|
||||
isSecureMode: true,
|
||||
workflowTriggerType: 'chat',
|
||||
},
|
||||
createFilteredResult,
|
||||
})
|
||||
|
||||
const streamResponse = new NextResponse(stream, {
|
||||
status: 200,
|
||||
headers: SSE_HEADERS,
|
||||
})
|
||||
return addCorsHeaders(streamResponse, request)
|
||||
} catch (error: any) {
|
||||
|
||||
@@ -416,7 +416,7 @@ describe('Chat API Utils', () => {
|
||||
execution: executionResult,
|
||||
}
|
||||
|
||||
// Simulate the type extraction logic from executeWorkflowForChat
|
||||
// Test that streaming execution wraps the result correctly
|
||||
const extractedFromStreaming =
|
||||
streamingResult && typeof streamingResult === 'object' && 'execution' in streamingResult
|
||||
? streamingResult.execution
|
||||
|
||||
@@ -1,29 +1,13 @@
|
||||
import { db } from '@sim/db'
|
||||
import { chat, userStats, workflow } from '@sim/db/schema'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { chat, workflow } from '@sim/db/schema'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { checkServerSideUsageLimits } from '@/lib/billing'
|
||||
import { isDev } from '@/lib/environment'
|
||||
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { LoggingSession } from '@/lib/logs/execution/logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
|
||||
import { hasAdminPermission } from '@/lib/permissions/utils'
|
||||
import { processStreamingBlockLogs } from '@/lib/tokenization'
|
||||
import { decryptSecret, generateRequestId } from '@/lib/utils'
|
||||
import { TriggerUtils } from '@/lib/workflows/triggers'
|
||||
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
|
||||
import { getBlock } from '@/blocks'
|
||||
import { Executor } from '@/executor'
|
||||
import type { BlockLog, ExecutionResult } from '@/executor/types'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
declare global {
|
||||
var __chatStreamProcessingTasks: Promise<{ success: boolean; error?: any }>[] | undefined
|
||||
}
|
||||
import { decryptSecret } from '@/lib/utils'
|
||||
import { uploadExecutionFile } from '@/lib/workflows/execution-file-storage'
|
||||
import type { UserFile } from '@/executor/types'
|
||||
|
||||
const logger = createLogger('ChatAuthUtils')
|
||||
|
||||
@@ -283,584 +267,59 @@ export async function validateChatAuth(
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a workflow for a chat request and returns the formatted output.
|
||||
*
|
||||
* When workflows reference <start.input>, they receive the input directly.
|
||||
* The conversationId is available at <start.conversationId> for maintaining chat context.
|
||||
*
|
||||
* @param chatId - Chat deployment identifier
|
||||
* @param input - User's chat input
|
||||
* @param conversationId - Optional ID for maintaining conversation context
|
||||
* @returns Workflow execution result formatted for the chat interface
|
||||
* Process and upload chat files to execution storage
|
||||
* Handles both base64 dataUrl format and direct URL pass-through
|
||||
*/
|
||||
export async function executeWorkflowForChat(
|
||||
chatId: string,
|
||||
input: string,
|
||||
conversationId?: string
|
||||
): Promise<any> {
|
||||
const requestId = generateRequestId()
|
||||
export async function processChatFiles(
|
||||
files: Array<{ dataUrl?: string; url?: string; name: string; type: string }>,
|
||||
executionContext: { workspaceId: string; workflowId: string; executionId: string },
|
||||
requestId: string
|
||||
): Promise<UserFile[]> {
|
||||
const uploadedFiles: UserFile[] = []
|
||||
|
||||
logger.debug(
|
||||
`[${requestId}] Executing workflow for chat: ${chatId}${
|
||||
conversationId ? `, conversationId: ${conversationId}` : ''
|
||||
}`
|
||||
)
|
||||
|
||||
// Find the chat deployment
|
||||
const deploymentResult = await db
|
||||
.select({
|
||||
id: chat.id,
|
||||
workflowId: chat.workflowId,
|
||||
userId: chat.userId,
|
||||
outputConfigs: chat.outputConfigs,
|
||||
customizations: chat.customizations,
|
||||
})
|
||||
.from(chat)
|
||||
.where(eq(chat.id, chatId))
|
||||
.limit(1)
|
||||
|
||||
if (deploymentResult.length === 0) {
|
||||
logger.warn(`[${requestId}] Chat not found: ${chatId}`)
|
||||
throw new Error('Chat not found')
|
||||
}
|
||||
|
||||
const deployment = deploymentResult[0]
|
||||
const workflowId = deployment.workflowId
|
||||
const executionId = uuidv4()
|
||||
|
||||
const usageCheck = await checkServerSideUsageLimits(deployment.userId)
|
||||
if (usageCheck.isExceeded) {
|
||||
logger.warn(
|
||||
`[${requestId}] User ${deployment.userId} has exceeded usage limits. Skipping chat execution.`,
|
||||
{
|
||||
currentUsage: usageCheck.currentUsage,
|
||||
limit: usageCheck.limit,
|
||||
workflowId: deployment.workflowId,
|
||||
chatId,
|
||||
}
|
||||
)
|
||||
throw new Error(usageCheck.message || CHAT_ERROR_MESSAGES.USAGE_LIMIT_EXCEEDED)
|
||||
}
|
||||
|
||||
// Set up logging for chat execution
|
||||
const loggingSession = new LoggingSession(workflowId, executionId, 'chat', requestId)
|
||||
|
||||
// Check for multi-output configuration in customizations
|
||||
const customizations = (deployment.customizations || {}) as Record<string, any>
|
||||
let outputBlockIds: string[] = []
|
||||
|
||||
// Extract output configs from the new schema format
|
||||
let selectedOutputIds: string[] = []
|
||||
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
|
||||
// Extract output IDs in the format expected by the streaming processor
|
||||
logger.debug(
|
||||
`[${requestId}] Found ${deployment.outputConfigs.length} output configs in deployment`
|
||||
)
|
||||
|
||||
selectedOutputIds = deployment.outputConfigs.map((config) => {
|
||||
const outputId = config.path
|
||||
? `${config.blockId}_${config.path}`
|
||||
: `${config.blockId}.content`
|
||||
|
||||
logger.debug(
|
||||
`[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'content'} -> outputId=${outputId}`
|
||||
)
|
||||
|
||||
return outputId
|
||||
})
|
||||
|
||||
// Also extract block IDs for legacy compatibility
|
||||
outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
|
||||
} else {
|
||||
// Use customizations as fallback
|
||||
outputBlockIds = Array.isArray(customizations.outputBlockIds)
|
||||
? customizations.outputBlockIds
|
||||
: []
|
||||
}
|
||||
|
||||
// Fall back to customizations if we still have no outputs
|
||||
if (
|
||||
outputBlockIds.length === 0 &&
|
||||
customizations.outputBlockIds &&
|
||||
customizations.outputBlockIds.length > 0
|
||||
) {
|
||||
outputBlockIds = customizations.outputBlockIds
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`[${requestId}] Using ${outputBlockIds.length} output blocks and ${selectedOutputIds.length} selected output IDs for extraction`
|
||||
)
|
||||
|
||||
// Find the workflow to check if it's deployed
|
||||
const workflowResult = await db
|
||||
.select({
|
||||
isDeployed: workflow.isDeployed,
|
||||
variables: workflow.variables,
|
||||
workspaceId: workflow.workspaceId,
|
||||
})
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
|
||||
if (workflowResult.length === 0 || !workflowResult[0].isDeployed) {
|
||||
logger.warn(`[${requestId}] Workflow not found or not deployed: ${workflowId}`)
|
||||
throw new Error('Workflow not available')
|
||||
}
|
||||
|
||||
// Load the active deployed state from the deployment versions table
|
||||
const { loadDeployedWorkflowState } = await import('@/lib/workflows/db-helpers')
|
||||
|
||||
let deployedState: WorkflowState
|
||||
try {
|
||||
deployedState = await loadDeployedWorkflowState(workflowId)
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to load deployed state for workflow ${workflowId}:`, error)
|
||||
throw new Error(`Workflow must be deployed to be available for chat`)
|
||||
}
|
||||
|
||||
const { blocks, edges, loops, parallels } = deployedState
|
||||
|
||||
// Prepare for execution, similar to use-workflow-execution.ts
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
|
||||
const filteredStates = Object.entries(mergedStates).reduce(
|
||||
(acc, [id, block]) => {
|
||||
const blockConfig = getBlock(block.type)
|
||||
const isTriggerBlock = blockConfig?.category === 'triggers'
|
||||
const isChatTrigger = block.type === 'chat_trigger'
|
||||
|
||||
// Keep all non-trigger blocks and also keep the chat_trigger block
|
||||
if (!isTriggerBlock || isChatTrigger) {
|
||||
acc[id] = block
|
||||
}
|
||||
return acc
|
||||
},
|
||||
{} as typeof mergedStates
|
||||
)
|
||||
|
||||
const currentBlockStates = Object.entries(filteredStates).reduce(
|
||||
(acc, [id, block]) => {
|
||||
acc[id] = Object.entries(block.subBlocks).reduce(
|
||||
(subAcc, [key, subBlock]) => {
|
||||
subAcc[key] = subBlock.value
|
||||
return subAcc
|
||||
},
|
||||
{} as Record<string, any>
|
||||
)
|
||||
return acc
|
||||
},
|
||||
{} as Record<string, Record<string, any>>
|
||||
)
|
||||
|
||||
// Get user environment variables with workspace precedence
|
||||
let envVars: Record<string, string> = {}
|
||||
try {
|
||||
const workspaceId = workflowResult[0].workspaceId || undefined
|
||||
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
|
||||
deployment.userId,
|
||||
workspaceId
|
||||
)
|
||||
envVars = { ...personalEncrypted, ...workspaceEncrypted }
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Could not fetch environment variables:`, error)
|
||||
}
|
||||
|
||||
let workflowVariables = {}
|
||||
try {
|
||||
if (workflowResult[0].variables) {
|
||||
workflowVariables =
|
||||
typeof workflowResult[0].variables === 'string'
|
||||
? JSON.parse(workflowResult[0].variables)
|
||||
: workflowResult[0].variables
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Could not parse workflow variables:`, error)
|
||||
}
|
||||
|
||||
// Filter edges to exclude connections to/from trigger blocks (same as manual execution)
|
||||
const triggerBlockIds = Object.keys(mergedStates).filter((id) => {
|
||||
const type = mergedStates[id].type
|
||||
const blockConfig = getBlock(type)
|
||||
// Exclude chat_trigger from the list so its edges are preserved
|
||||
return blockConfig?.category === 'triggers' && type !== 'chat_trigger'
|
||||
})
|
||||
|
||||
const filteredEdges = edges.filter(
|
||||
(edge) => !triggerBlockIds.includes(edge.source) && !triggerBlockIds.includes(edge.target)
|
||||
)
|
||||
|
||||
// Create serialized workflow with filtered blocks and edges
|
||||
const serializedWorkflow = new Serializer().serializeWorkflow(
|
||||
filteredStates,
|
||||
filteredEdges,
|
||||
loops,
|
||||
parallels,
|
||||
true // Enable validation during execution
|
||||
)
|
||||
|
||||
// Decrypt environment variables
|
||||
const decryptedEnvVars: Record<string, string> = {}
|
||||
for (const [key, encryptedValue] of Object.entries(envVars)) {
|
||||
for (const file of files) {
|
||||
try {
|
||||
const { decrypted } = await decryptSecret(encryptedValue)
|
||||
decryptedEnvVars[key] = decrypted
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Failed to decrypt environment variable "${key}"`, error)
|
||||
// Log but continue - we don't want to break execution if just one var fails
|
||||
}
|
||||
}
|
||||
if (file.dataUrl) {
|
||||
const dataUrlPrefix = 'data:'
|
||||
const base64Prefix = ';base64,'
|
||||
|
||||
// Process block states to ensure response formats are properly parsed
|
||||
const processedBlockStates = Object.entries(currentBlockStates).reduce(
|
||||
(acc, [blockId, blockState]) => {
|
||||
// Check if this block has a responseFormat that needs to be parsed
|
||||
if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
|
||||
try {
|
||||
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
|
||||
// Attempt to parse the responseFormat if it's a string
|
||||
const parsedResponseFormat = JSON.parse(blockState.responseFormat)
|
||||
|
||||
acc[blockId] = {
|
||||
...blockState,
|
||||
responseFormat: parsedResponseFormat,
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Failed to parse responseFormat for block ${blockId}`, error)
|
||||
acc[blockId] = blockState
|
||||
}
|
||||
} else {
|
||||
acc[blockId] = blockState
|
||||
}
|
||||
return acc
|
||||
},
|
||||
{} as Record<string, Record<string, any>>
|
||||
)
|
||||
|
||||
// Start logging session
|
||||
await loggingSession.safeStart({
|
||||
userId: deployment.userId,
|
||||
workspaceId: workflowResult[0].workspaceId || '',
|
||||
variables: workflowVariables,
|
||||
})
|
||||
|
||||
let sessionCompleted = false
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
try {
|
||||
const streamedContent = new Map<string, string>()
|
||||
const streamedBlocks = new Set<string>() // Track which blocks have started streaming
|
||||
|
||||
const onStream = async (streamingExecution: any): Promise<void> => {
|
||||
if (!streamingExecution.stream) return
|
||||
|
||||
const blockId = streamingExecution.execution?.blockId
|
||||
const reader = streamingExecution.stream.getReader()
|
||||
if (blockId) {
|
||||
streamedContent.set(blockId, '')
|
||||
|
||||
// Add separator if this is not the first block to stream
|
||||
if (streamedBlocks.size > 0) {
|
||||
// Send separator before the new block starts
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ blockId, chunk: '\n\n' })}\n\n`)
|
||||
)
|
||||
}
|
||||
streamedBlocks.add(blockId)
|
||||
}
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ blockId, event: 'end' })}\n\n`)
|
||||
)
|
||||
break
|
||||
}
|
||||
const chunk = new TextDecoder().decode(value)
|
||||
if (blockId) {
|
||||
streamedContent.set(blockId, (streamedContent.get(blockId) || '') + chunk)
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ blockId, chunk })}\n\n`))
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error while reading from stream:', error)
|
||||
controller.error(error)
|
||||
}
|
||||
if (!file.dataUrl.startsWith(dataUrlPrefix)) {
|
||||
logger.warn(`[${requestId}] Invalid dataUrl format for file: ${file.name}`)
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the start block for chat execution BEFORE creating executor
|
||||
const startBlock = TriggerUtils.findStartBlock(mergedStates, 'chat')
|
||||
|
||||
if (!startBlock) {
|
||||
const errorMessage = CHAT_ERROR_MESSAGES.NO_CHAT_TRIGGER
|
||||
logger.error(`[${requestId}] ${errorMessage}`)
|
||||
|
||||
if (!sessionCompleted) {
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: { message: errorMessage },
|
||||
})
|
||||
sessionCompleted = true
|
||||
}
|
||||
|
||||
// Send error event that the client expects
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({
|
||||
event: 'error',
|
||||
error: CHAT_ERROR_MESSAGES.GENERIC_ERROR,
|
||||
})}\n\n`
|
||||
)
|
||||
const base64Index = file.dataUrl.indexOf(base64Prefix)
|
||||
if (base64Index === -1) {
|
||||
logger.warn(
|
||||
`[${requestId}] Invalid dataUrl format (no base64 marker) for file: ${file.name}`
|
||||
)
|
||||
controller.close()
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
const startBlockId = startBlock.blockId
|
||||
const mimeType = file.dataUrl.substring(dataUrlPrefix.length, base64Index)
|
||||
const base64Data = file.dataUrl.substring(base64Index + base64Prefix.length)
|
||||
const buffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Create executor AFTER confirming we have a chat trigger
|
||||
const executor = new Executor({
|
||||
workflow: serializedWorkflow,
|
||||
currentBlockStates: processedBlockStates,
|
||||
envVarValues: decryptedEnvVars,
|
||||
workflowInput: { input: input, conversationId },
|
||||
workflowVariables,
|
||||
contextExtensions: {
|
||||
stream: true,
|
||||
selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
|
||||
edges: filteredEdges.map((e: any) => ({
|
||||
source: e.source,
|
||||
target: e.target,
|
||||
})),
|
||||
onStream,
|
||||
isDeployedContext: true,
|
||||
},
|
||||
})
|
||||
logger.debug(`[${requestId}] Uploading file to S3: ${file.name} (${buffer.length} bytes)`)
|
||||
|
||||
// Set up logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await executor.execute(workflowId, startBlockId)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Chat workflow execution failed:`, error)
|
||||
if (!sessionCompleted) {
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: { message: error.message || 'Chat workflow execution failed' },
|
||||
})
|
||||
sessionCompleted = true
|
||||
}
|
||||
|
||||
// Send error to stream before ending
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({
|
||||
event: 'error',
|
||||
error: error.message || 'Chat workflow execution failed',
|
||||
})}\n\n`
|
||||
)
|
||||
)
|
||||
controller.close()
|
||||
return // Don't throw - just return to end the stream gracefully
|
||||
}
|
||||
|
||||
// Handle both ExecutionResult and StreamingExecution types
|
||||
const executionResult =
|
||||
result && typeof result === 'object' && 'execution' in result
|
||||
? (result.execution as ExecutionResult)
|
||||
: (result as ExecutionResult)
|
||||
|
||||
if (executionResult?.logs) {
|
||||
// Update streamed content and apply tokenization - process regardless of overall success
|
||||
// This ensures partial successes (some agents succeed, some fail) still return results
|
||||
|
||||
// Add newlines between different agent outputs for better readability
|
||||
const processedOutputs = new Set<string>()
|
||||
executionResult.logs.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId)
|
||||
if (log.output && content) {
|
||||
// Add newline separation between different outputs (but not before the first one)
|
||||
const separator = processedOutputs.size > 0 ? '\n\n' : ''
|
||||
log.output.content = separator + content
|
||||
processedOutputs.add(log.blockId)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Also process non-streamed outputs from selected blocks (like function blocks)
|
||||
// This uses the same logic as the chat panel to ensure identical behavior
|
||||
const nonStreamingLogs = executionResult.logs.filter(
|
||||
(log: BlockLog) => !streamedContent.has(log.blockId)
|
||||
)
|
||||
|
||||
// Extract the exact same functions used by the chat panel
|
||||
const extractBlockIdFromOutputId = (outputId: string): string => {
|
||||
return outputId.includes('_') ? outputId.split('_')[0] : outputId.split('.')[0]
|
||||
}
|
||||
|
||||
const extractPathFromOutputId = (outputId: string, blockId: string): string => {
|
||||
return outputId.substring(blockId.length + 1)
|
||||
}
|
||||
|
||||
const parseOutputContentSafely = (output: any): any => {
|
||||
if (!output?.content) {
|
||||
return output
|
||||
}
|
||||
|
||||
if (typeof output.content === 'string') {
|
||||
try {
|
||||
return JSON.parse(output.content)
|
||||
} catch (e) {
|
||||
// Fallback to original structure if parsing fails
|
||||
return output
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// Filter outputs that have matching logs (exactly like chat panel)
|
||||
const outputsToRender = selectedOutputIds.filter((outputId) => {
|
||||
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
|
||||
return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
|
||||
})
|
||||
|
||||
// Process each selected output (exactly like chat panel)
|
||||
for (const outputId of outputsToRender) {
|
||||
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
|
||||
const path = extractPathFromOutputId(outputId, blockIdForOutput)
|
||||
const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
|
||||
|
||||
if (log) {
|
||||
let outputValue: any = log.output
|
||||
|
||||
if (path) {
|
||||
// Parse JSON content safely (exactly like chat panel)
|
||||
outputValue = parseOutputContentSafely(outputValue)
|
||||
|
||||
const pathParts = path.split('.')
|
||||
for (const part of pathParts) {
|
||||
if (outputValue && typeof outputValue === 'object' && part in outputValue) {
|
||||
outputValue = outputValue[part]
|
||||
} else {
|
||||
outputValue = undefined
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (outputValue !== undefined) {
|
||||
// Add newline separation between different outputs
|
||||
const separator = processedOutputs.size > 0 ? '\n\n' : ''
|
||||
|
||||
// Format the output exactly like the chat panel
|
||||
const formattedOutput =
|
||||
typeof outputValue === 'string'
|
||||
? outputValue
|
||||
: JSON.stringify(outputValue, null, 2)
|
||||
|
||||
// Update the log content
|
||||
if (!log.output.content) {
|
||||
log.output.content = separator + formattedOutput
|
||||
} else {
|
||||
log.output.content = separator + formattedOutput
|
||||
}
|
||||
processedOutputs.add(log.blockId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process all logs for streaming tokenization
|
||||
const processedCount = processStreamingBlockLogs(executionResult.logs, streamedContent)
|
||||
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
const enrichedResult = { ...executionResult, traceSpans, totalDuration }
|
||||
if (conversationId) {
|
||||
if (!enrichedResult.metadata) {
|
||||
enrichedResult.metadata = {
|
||||
duration: totalDuration,
|
||||
startTime: new Date().toISOString(),
|
||||
}
|
||||
}
|
||||
;(enrichedResult.metadata as any).conversationId = conversationId
|
||||
}
|
||||
// Use the executionId created at the beginning of this function
|
||||
logger.debug(`Using execution ID for deployed chat: ${executionId}`)
|
||||
|
||||
if (executionResult.success) {
|
||||
try {
|
||||
await db
|
||||
.update(userStats)
|
||||
.set({
|
||||
totalChatExecutions: sql`total_chat_executions + 1`,
|
||||
lastActive: new Date(),
|
||||
})
|
||||
.where(eq(userStats.userId, deployment.userId))
|
||||
logger.debug(`Updated user stats for deployed chat: ${deployment.userId}`)
|
||||
} catch (error) {
|
||||
logger.error(`Failed to update user stats for deployed chat:`, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!(result && typeof result === 'object' && 'stream' in result)) {
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
|
||||
)
|
||||
}
|
||||
|
||||
if (!sessionCompleted) {
|
||||
const resultForTracing =
|
||||
executionResult || ({ success: true, output: {}, logs: [] } as ExecutionResult)
|
||||
const { traceSpans } = buildTraceSpans(resultForTracing)
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: executionResult?.metadata?.duration || 0,
|
||||
finalOutput: executionResult?.output || {},
|
||||
traceSpans,
|
||||
})
|
||||
sessionCompleted = true
|
||||
}
|
||||
|
||||
controller.close()
|
||||
} catch (error: any) {
|
||||
// Handle any errors that occur in the stream
|
||||
logger.error(`[${requestId}] Stream error:`, error)
|
||||
|
||||
// Send error event to client
|
||||
const encoder = new TextEncoder()
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({
|
||||
event: 'error',
|
||||
error: error.message || 'An unexpected error occurred',
|
||||
})}\n\n`
|
||||
)
|
||||
const userFile = await uploadExecutionFile(
|
||||
executionContext,
|
||||
buffer,
|
||||
file.name,
|
||||
mimeType || file.type
|
||||
)
|
||||
|
||||
// Try to complete the logging session with error if not already completed
|
||||
if (!sessionCompleted && loggingSession) {
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: { message: error.message || 'Stream processing error' },
|
||||
})
|
||||
sessionCompleted = true
|
||||
}
|
||||
|
||||
controller.close()
|
||||
uploadedFiles.push(userFile)
|
||||
logger.debug(`[${requestId}] Successfully uploaded ${file.name} with URL: ${userFile.url}`)
|
||||
} else if (file.url) {
|
||||
uploadedFiles.push(file as UserFile)
|
||||
logger.debug(`[${requestId}] Using existing URL for file: ${file.name}`)
|
||||
}
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to process file ${file.name}:`, error)
|
||||
throw new Error(`Failed to upload file: ${file.name}`)
|
||||
}
|
||||
}
|
||||
|
||||
return stream
|
||||
return uploadedFiles
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import { readFile } from 'fs/promises'
|
||||
import type { NextRequest, NextResponse } from 'next/server'
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { NextResponse } from 'next/server'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { downloadFile, getStorageProvider, isUsingCloudStorage } from '@/lib/uploads'
|
||||
import { S3_KB_CONFIG } from '@/lib/uploads/setup'
|
||||
import '@/lib/uploads/setup.server'
|
||||
|
||||
import {
|
||||
createErrorResponse,
|
||||
createFileResponse,
|
||||
@@ -15,9 +16,6 @@ import {
|
||||
|
||||
const logger = createLogger('FilesServeAPI')
|
||||
|
||||
/**
|
||||
* Main API route handler for serving files
|
||||
*/
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
{ params }: { params: Promise<{ path: string[] }> }
|
||||
@@ -31,27 +29,26 @@ export async function GET(
|
||||
|
||||
logger.info('File serve request:', { path })
|
||||
|
||||
// Join the path segments to get the filename or cloud key
|
||||
const fullPath = path.join('/')
|
||||
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
|
||||
|
||||
// Check if this is a cloud file (path starts with 's3/' or 'blob/')
|
||||
if (!authResult.success) {
|
||||
logger.warn('Unauthorized file access attempt', { path, error: authResult.error })
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const userId = authResult.userId
|
||||
const fullPath = path.join('/')
|
||||
const isS3Path = path[0] === 's3'
|
||||
const isBlobPath = path[0] === 'blob'
|
||||
const isCloudPath = isS3Path || isBlobPath
|
||||
const cloudKey = isCloudPath ? path.slice(1).join('/') : fullPath
|
||||
|
||||
// Use cloud handler if in production, path explicitly specifies cloud storage, or we're using cloud storage
|
||||
if (isUsingCloudStorage() || isCloudPath) {
|
||||
// Extract the actual key (remove 's3/' or 'blob/' prefix if present)
|
||||
const cloudKey = isCloudPath ? path.slice(1).join('/') : fullPath
|
||||
|
||||
// Get bucket type from query parameter
|
||||
const bucketType = request.nextUrl.searchParams.get('bucket')
|
||||
|
||||
return await handleCloudProxy(cloudKey, bucketType)
|
||||
return await handleCloudProxy(cloudKey, bucketType, userId)
|
||||
}
|
||||
|
||||
// Use local handler for local files
|
||||
return await handleLocalFile(fullPath)
|
||||
return await handleLocalFile(fullPath, userId)
|
||||
} catch (error) {
|
||||
logger.error('Error serving file:', error)
|
||||
|
||||
@@ -63,10 +60,7 @@ export async function GET(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle local file serving
|
||||
*/
|
||||
async function handleLocalFile(filename: string): Promise<NextResponse> {
|
||||
async function handleLocalFile(filename: string, userId?: string): Promise<NextResponse> {
|
||||
try {
|
||||
const filePath = findLocalFile(filename)
|
||||
|
||||
@@ -77,6 +71,8 @@ async function handleLocalFile(filename: string): Promise<NextResponse> {
|
||||
const fileBuffer = await readFile(filePath)
|
||||
const contentType = getContentType(filename)
|
||||
|
||||
logger.info('Local file served', { userId, filename, size: fileBuffer.length })
|
||||
|
||||
return createFileResponse({
|
||||
buffer: fileBuffer,
|
||||
contentType,
|
||||
@@ -112,12 +108,10 @@ async function downloadKBFile(cloudKey: string): Promise<Buffer> {
|
||||
throw new Error(`Unsupported storage provider for KB files: ${storageProvider}`)
|
||||
}
|
||||
|
||||
/**
|
||||
* Proxy cloud file through our server
|
||||
*/
|
||||
async function handleCloudProxy(
|
||||
cloudKey: string,
|
||||
bucketType?: string | null
|
||||
bucketType?: string | null,
|
||||
userId?: string
|
||||
): Promise<NextResponse> {
|
||||
try {
|
||||
// Check if this is a KB file (starts with 'kb/')
|
||||
@@ -156,6 +150,13 @@ async function handleCloudProxy(
|
||||
const originalFilename = cloudKey.split('/').pop() || 'download'
|
||||
const contentType = getContentType(originalFilename)
|
||||
|
||||
logger.info('Cloud file served', {
|
||||
userId,
|
||||
key: cloudKey,
|
||||
size: fileBuffer.length,
|
||||
bucket: bucketType || 'default',
|
||||
})
|
||||
|
||||
return createFileResponse({
|
||||
buffer: fileBuffer,
|
||||
contentType,
|
||||
|
||||
@@ -123,8 +123,7 @@ export async function POST(request: NextRequest) {
|
||||
}
|
||||
}
|
||||
|
||||
// Create the serve path
|
||||
const servePath = `/api/files/serve/${result.key}`
|
||||
const servePath = result.path
|
||||
|
||||
const uploadResult = {
|
||||
name: originalName,
|
||||
|
||||
@@ -307,6 +307,22 @@ function getSecureFileHeaders(filename: string, originalContentType: string) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode filename for Content-Disposition header to support non-ASCII characters
|
||||
* Uses RFC 5987 encoding for international characters
|
||||
*/
|
||||
function encodeFilenameForHeader(filename: string): string {
|
||||
const hasNonAscii = /[^\x00-\x7F]/.test(filename)
|
||||
|
||||
if (!hasNonAscii) {
|
||||
return `filename="${filename}"`
|
||||
}
|
||||
|
||||
const encodedFilename = encodeURIComponent(filename)
|
||||
const asciiSafe = filename.replace(/[^\x00-\x7F]/g, '_')
|
||||
return `filename="${asciiSafe}"; filename*=UTF-8''${encodedFilename}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a file response with appropriate security headers
|
||||
*/
|
||||
@@ -317,7 +333,7 @@ export function createFileResponse(file: FileResponse): NextResponse {
|
||||
status: 200,
|
||||
headers: {
|
||||
'Content-Type': contentType,
|
||||
'Content-Disposition': `${disposition}; filename="${file.filename}"`,
|
||||
'Content-Disposition': `${disposition}; ${encodeFilenameForHeader(file.filename)}`,
|
||||
'Cache-Control': 'public, max-age=31536000', // Cache for 1 year
|
||||
'X-Content-Type-Options': 'nosniff',
|
||||
'Content-Security-Policy': "default-src 'none'; style-src 'unsafe-inline'; sandbox;",
|
||||
|
||||
@@ -234,6 +234,7 @@ describe('Knowledge Base By ID API Route', () => {
|
||||
{
|
||||
name: validUpdateData.name,
|
||||
description: validUpdateData.description,
|
||||
workspaceId: undefined,
|
||||
chunkingConfig: undefined,
|
||||
},
|
||||
expect.any(String)
|
||||
|
||||
@@ -103,6 +103,7 @@ export async function PUT(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
{
|
||||
name: validatedData.name,
|
||||
description: validatedData.description,
|
||||
workspaceId: validatedData.workspaceId,
|
||||
chunkingConfig: validatedData.chunkingConfig,
|
||||
},
|
||||
requestId
|
||||
|
||||
@@ -4,6 +4,7 @@ import { and, eq, lte, not, sql } from 'drizzle-orm'
|
||||
import { NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { getApiKeyOwnerUserId } from '@/lib/api-key/service'
|
||||
import { checkServerSideUsageLimits } from '@/lib/billing'
|
||||
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
|
||||
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
|
||||
@@ -17,7 +18,7 @@ import {
|
||||
getSubBlockValue,
|
||||
} from '@/lib/schedules/utils'
|
||||
import { decryptSecret, generateRequestId } from '@/lib/utils'
|
||||
import { loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
|
||||
import { blockExistsInDeployment, loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
|
||||
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
|
||||
import { Executor } from '@/executor'
|
||||
import { Serializer } from '@/serializer'
|
||||
@@ -106,12 +107,22 @@ export async function GET() {
|
||||
continue
|
||||
}
|
||||
|
||||
const actorUserId = await getApiKeyOwnerUserId(workflowRecord.pinnedApiKeyId)
|
||||
|
||||
if (!actorUserId) {
|
||||
logger.warn(
|
||||
`[${requestId}] Skipping schedule ${schedule.id}: pinned API key required to attribute usage.`
|
||||
)
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check rate limits for scheduled execution (checks both personal and org subscriptions)
|
||||
const userSubscription = await getHighestPrioritySubscription(workflowRecord.userId)
|
||||
const userSubscription = await getHighestPrioritySubscription(actorUserId)
|
||||
|
||||
const rateLimiter = new RateLimiter()
|
||||
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
|
||||
workflowRecord.userId,
|
||||
actorUserId,
|
||||
userSubscription,
|
||||
'schedule',
|
||||
false // schedules are always sync
|
||||
@@ -149,7 +160,7 @@ export async function GET() {
|
||||
continue
|
||||
}
|
||||
|
||||
const usageCheck = await checkServerSideUsageLimits(workflowRecord.userId)
|
||||
const usageCheck = await checkServerSideUsageLimits(actorUserId)
|
||||
if (usageCheck.isExceeded) {
|
||||
logger.warn(
|
||||
`[${requestId}] User ${workflowRecord.userId} has exceeded usage limits. Skipping scheduled execution.`,
|
||||
@@ -159,26 +170,19 @@ export async function GET() {
|
||||
workflowId: schedule.workflowId,
|
||||
}
|
||||
)
|
||||
|
||||
// Error logging handled by logging session
|
||||
|
||||
const retryDelay = 24 * 60 * 60 * 1000 // 24 hour delay for exceeded limits
|
||||
const nextRetryAt = new Date(now.getTime() + retryDelay)
|
||||
|
||||
try {
|
||||
const deployedData = await loadDeployedWorkflowState(schedule.workflowId)
|
||||
const nextRunAt = calculateNextRunTime(schedule, deployedData.blocks as any)
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({
|
||||
updatedAt: now,
|
||||
nextRunAt: nextRetryAt,
|
||||
})
|
||||
.set({ updatedAt: now, nextRunAt })
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
|
||||
logger.debug(`[${requestId}] Updated next retry time due to usage limits`)
|
||||
} catch (updateError) {
|
||||
logger.error(`[${requestId}] Error updating schedule for usage limits:`, updateError)
|
||||
} catch (calcErr) {
|
||||
logger.warn(
|
||||
`[${requestId}] Unable to calculate nextRunAt while skipping schedule ${schedule.id}`,
|
||||
calcErr
|
||||
)
|
||||
}
|
||||
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
@@ -206,11 +210,25 @@ export async function GET() {
|
||||
const parallels = deployedData.parallels
|
||||
logger.info(`[${requestId}] Loaded deployed workflow ${schedule.workflowId}`)
|
||||
|
||||
// Validate that the schedule's trigger block exists in the deployed state
|
||||
if (schedule.blockId) {
|
||||
const blockExists = await blockExistsInDeployment(
|
||||
schedule.workflowId,
|
||||
schedule.blockId
|
||||
)
|
||||
if (!blockExists) {
|
||||
logger.warn(
|
||||
`[${requestId}] Schedule trigger block ${schedule.blockId} not found in deployed workflow ${schedule.workflowId}. Skipping execution.`
|
||||
)
|
||||
return { skip: true, blocks: {} as Record<string, BlockState> }
|
||||
}
|
||||
}
|
||||
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
|
||||
// Retrieve environment variables with workspace precedence
|
||||
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
|
||||
workflowRecord.userId,
|
||||
actorUserId,
|
||||
workflowRecord.workspaceId || undefined
|
||||
)
|
||||
const variables = EnvVarsSchema.parse({
|
||||
@@ -355,7 +373,6 @@ export async function GET() {
|
||||
)
|
||||
|
||||
const input = {
|
||||
workflowId: schedule.workflowId,
|
||||
_context: {
|
||||
workflowId: schedule.workflowId,
|
||||
},
|
||||
@@ -363,7 +380,7 @@ export async function GET() {
|
||||
|
||||
// Start logging with environment variables
|
||||
await loggingSession.safeStart({
|
||||
userId: workflowRecord.userId,
|
||||
userId: actorUserId,
|
||||
workspaceId: workflowRecord.workspaceId || '',
|
||||
variables: variables || {},
|
||||
})
|
||||
@@ -407,7 +424,7 @@ export async function GET() {
|
||||
totalScheduledExecutions: sql`total_scheduled_executions + 1`,
|
||||
lastActive: now,
|
||||
})
|
||||
.where(eq(userStats.userId, workflowRecord.userId))
|
||||
.where(eq(userStats.userId, actorUserId))
|
||||
|
||||
logger.debug(`[${requestId}] Updated user stats for scheduled execution`)
|
||||
} catch (statsError) {
|
||||
@@ -446,6 +463,7 @@ export async function GET() {
|
||||
message: `Schedule execution failed before workflow started: ${earlyError.message}`,
|
||||
stackTrace: earlyError.stack,
|
||||
},
|
||||
traceSpans: [],
|
||||
})
|
||||
} catch (loggingError) {
|
||||
logger.error(
|
||||
@@ -459,6 +477,12 @@ export async function GET() {
|
||||
}
|
||||
})()
|
||||
|
||||
// Check if execution was skipped (e.g., trigger block not found)
|
||||
if ('skip' in executionSuccess && executionSuccess.skip) {
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
|
||||
if (executionSuccess.success) {
|
||||
logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`)
|
||||
|
||||
@@ -565,6 +589,7 @@ export async function GET() {
|
||||
message: `Schedule execution failed: ${error.message}`,
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
traceSpans: [],
|
||||
})
|
||||
} catch (loggingError) {
|
||||
logger.error(
|
||||
|
||||
@@ -3,6 +3,7 @@ import { userStats, workflow } from '@sim/db/schema'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import OpenAI, { AzureOpenAI } from 'openai'
|
||||
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
|
||||
import { env } from '@/lib/env'
|
||||
import { getCostMultiplier, isBillingEnabled } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
@@ -133,6 +134,9 @@ async function updateUserStatsForWand(
|
||||
tokensUsed: totalTokens,
|
||||
costAdded: costToStore,
|
||||
})
|
||||
|
||||
// Check if user has hit overage threshold and bill incrementally
|
||||
await checkAndBillOverageThreshold(userId)
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to update user stats for wand usage`, error)
|
||||
}
|
||||
|
||||
@@ -282,11 +282,13 @@ export async function DELETE(
|
||||
|
||||
if (!resolvedExternalId) {
|
||||
try {
|
||||
const requestOrigin = new URL(request.url).origin
|
||||
const effectiveOrigin = requestOrigin.includes('localhost')
|
||||
? env.NEXT_PUBLIC_APP_URL || requestOrigin
|
||||
: requestOrigin
|
||||
const expectedNotificationUrl = `${effectiveOrigin}/api/webhooks/trigger/${foundWebhook.path}`
|
||||
if (!env.NEXT_PUBLIC_APP_URL) {
|
||||
logger.error(
|
||||
`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot match Airtable webhook`
|
||||
)
|
||||
throw new Error('NEXT_PUBLIC_APP_URL must be configured')
|
||||
}
|
||||
const expectedNotificationUrl = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/trigger/${foundWebhook.path}`
|
||||
|
||||
const listUrl = `https://api.airtable.com/v0/bases/${baseId}/webhooks`
|
||||
const listResp = await fetch(listUrl, {
|
||||
|
||||
@@ -64,13 +64,13 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
return NextResponse.json({ error: 'Forbidden' }, { status: 403 })
|
||||
}
|
||||
|
||||
const origin = new URL(request.url).origin
|
||||
const effectiveOrigin = origin.includes('localhost')
|
||||
? env.NEXT_PUBLIC_APP_URL || origin
|
||||
: origin
|
||||
if (!env.NEXT_PUBLIC_APP_URL) {
|
||||
logger.error(`[${requestId}] NEXT_PUBLIC_APP_URL not configured`)
|
||||
return NextResponse.json({ error: 'Server configuration error' }, { status: 500 })
|
||||
}
|
||||
|
||||
const token = await signTestWebhookToken(id, ttlSeconds)
|
||||
const url = `${effectiveOrigin}/api/webhooks/test/${id}?token=${encodeURIComponent(token)}`
|
||||
const url = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/test/${id}?token=${encodeURIComponent(token)}`
|
||||
|
||||
logger.info(`[${requestId}] Minted test URL for webhook ${id}`)
|
||||
return NextResponse.json({
|
||||
|
||||
@@ -432,25 +432,20 @@ async function createAirtableWebhookSubscription(
|
||||
logger.warn(
|
||||
`[${requestId}] Could not retrieve Airtable access token for user ${userId}. Cannot create webhook in Airtable.`
|
||||
)
|
||||
// Instead of silently returning, throw an error with clear user guidance
|
||||
throw new Error(
|
||||
'Airtable account connection required. Please connect your Airtable account in the trigger configuration and try again.'
|
||||
)
|
||||
}
|
||||
|
||||
const requestOrigin = new URL(request.url).origin
|
||||
// Ensure origin does not point to localhost for external API calls
|
||||
const effectiveOrigin = requestOrigin.includes('localhost')
|
||||
? env.NEXT_PUBLIC_APP_URL || requestOrigin // Use env var if available, fallback to original
|
||||
: requestOrigin
|
||||
|
||||
const notificationUrl = `${effectiveOrigin}/api/webhooks/trigger/${path}`
|
||||
if (effectiveOrigin !== requestOrigin) {
|
||||
logger.debug(
|
||||
`[${requestId}] Remapped localhost origin to ${effectiveOrigin} for notificationUrl`
|
||||
if (!env.NEXT_PUBLIC_APP_URL) {
|
||||
logger.error(
|
||||
`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot register Airtable webhook`
|
||||
)
|
||||
throw new Error('NEXT_PUBLIC_APP_URL must be configured for Airtable webhook registration')
|
||||
}
|
||||
|
||||
const notificationUrl = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/trigger/${path}`
|
||||
|
||||
const airtableApiUrl = `https://api.airtable.com/v0/bases/${baseId}/webhooks`
|
||||
|
||||
const specification: any = {
|
||||
@@ -549,19 +544,15 @@ async function createTelegramWebhookSubscription(
|
||||
return // Cannot proceed without botToken
|
||||
}
|
||||
|
||||
const requestOrigin = new URL(request.url).origin
|
||||
// Ensure origin does not point to localhost for external API calls
|
||||
const effectiveOrigin = requestOrigin.includes('localhost')
|
||||
? env.NEXT_PUBLIC_APP_URL || requestOrigin // Use env var if available, fallback to original
|
||||
: requestOrigin
|
||||
|
||||
const notificationUrl = `${effectiveOrigin}/api/webhooks/trigger/${path}`
|
||||
if (effectiveOrigin !== requestOrigin) {
|
||||
logger.debug(
|
||||
`[${requestId}] Remapped localhost origin to ${effectiveOrigin} for notificationUrl`
|
||||
if (!env.NEXT_PUBLIC_APP_URL) {
|
||||
logger.error(
|
||||
`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot register Telegram webhook`
|
||||
)
|
||||
throw new Error('NEXT_PUBLIC_APP_URL must be configured for Telegram webhook registration')
|
||||
}
|
||||
|
||||
const notificationUrl = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/trigger/${path}`
|
||||
|
||||
const telegramApiUrl = `https://api.telegram.org/bot${botToken}/setWebhook`
|
||||
|
||||
const requestBody: any = {
|
||||
|
||||
@@ -2,6 +2,7 @@ import { db } from '@sim/db'
|
||||
import { webhook } from '@sim/db/schema'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
|
||||
@@ -13,7 +14,6 @@ export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
// Get the webhook ID and provider from the query parameters
|
||||
const { searchParams } = new URL(request.url)
|
||||
const webhookId = searchParams.get('id')
|
||||
|
||||
@@ -24,7 +24,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
logger.debug(`[${requestId}] Testing webhook with ID: ${webhookId}`)
|
||||
|
||||
// Find the webhook in the database
|
||||
const webhooks = await db.select().from(webhook).where(eq(webhook.id, webhookId)).limit(1)
|
||||
|
||||
if (webhooks.length === 0) {
|
||||
@@ -36,8 +35,14 @@ export async function GET(request: NextRequest) {
|
||||
const provider = foundWebhook.provider || 'generic'
|
||||
const providerConfig = (foundWebhook.providerConfig as Record<string, any>) || {}
|
||||
|
||||
// Construct the webhook URL
|
||||
const baseUrl = new URL(request.url).origin
|
||||
if (!env.NEXT_PUBLIC_APP_URL) {
|
||||
logger.error(`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot test webhook`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'NEXT_PUBLIC_APP_URL must be configured' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
const baseUrl = env.NEXT_PUBLIC_APP_URL
|
||||
const webhookUrl = `${baseUrl}/api/webhooks/trigger/${foundWebhook.path}`
|
||||
|
||||
logger.info(`[${requestId}] Testing webhook for provider: ${provider}`, {
|
||||
@@ -46,7 +51,6 @@ export async function GET(request: NextRequest) {
|
||||
isActive: foundWebhook.isActive,
|
||||
})
|
||||
|
||||
// Provider-specific test logic
|
||||
switch (provider) {
|
||||
case 'whatsapp': {
|
||||
const verificationToken = providerConfig.verificationToken
|
||||
@@ -59,10 +63,8 @@ export async function GET(request: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
// Generate a test challenge
|
||||
const challenge = `test_${Date.now()}`
|
||||
|
||||
// Construct the WhatsApp verification URL
|
||||
const whatsappUrl = `${webhookUrl}?hub.mode=subscribe&hub.verify_token=${verificationToken}&hub.challenge=${challenge}`
|
||||
|
||||
logger.debug(`[${requestId}] Testing WhatsApp webhook verification`, {
|
||||
@@ -70,19 +72,16 @@ export async function GET(request: NextRequest) {
|
||||
challenge,
|
||||
})
|
||||
|
||||
// Make a request to the webhook endpoint
|
||||
const response = await fetch(whatsappUrl, {
|
||||
headers: {
|
||||
'User-Agent': 'facebookplatform/1.0',
|
||||
},
|
||||
})
|
||||
|
||||
// Get the response details
|
||||
const status = response.status
|
||||
const contentType = response.headers.get('content-type')
|
||||
const responseText = await response.text()
|
||||
|
||||
// Check if the test was successful
|
||||
const success = status === 200 && responseText === challenge
|
||||
|
||||
if (success) {
|
||||
@@ -139,7 +138,6 @@ export async function GET(request: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
// Test the webhook endpoint with a simple message to check if it's reachable
|
||||
const testMessage = {
|
||||
update_id: 12345,
|
||||
message: {
|
||||
@@ -165,7 +163,6 @@ export async function GET(request: NextRequest) {
|
||||
url: webhookUrl,
|
||||
})
|
||||
|
||||
// Make a test request to the webhook endpoint
|
||||
const response = await fetch(webhookUrl, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -175,16 +172,12 @@ export async function GET(request: NextRequest) {
|
||||
body: JSON.stringify(testMessage),
|
||||
})
|
||||
|
||||
// Get the response details
|
||||
const status = response.status
|
||||
let responseText = ''
|
||||
try {
|
||||
responseText = await response.text()
|
||||
} catch (_e) {
|
||||
// Ignore if we can't get response text
|
||||
}
|
||||
} catch (_e) {}
|
||||
|
||||
// Consider success if we get a 2xx response
|
||||
const success = status >= 200 && status < 300
|
||||
|
||||
if (success) {
|
||||
@@ -196,7 +189,6 @@ export async function GET(request: NextRequest) {
|
||||
})
|
||||
}
|
||||
|
||||
// Get webhook info from Telegram API
|
||||
let webhookInfo = null
|
||||
try {
|
||||
const webhookInfoUrl = `https://api.telegram.org/bot${botToken}/getWebhookInfo`
|
||||
@@ -215,7 +207,6 @@ export async function GET(request: NextRequest) {
|
||||
logger.warn(`[${requestId}] Failed to get Telegram webhook info`, e)
|
||||
}
|
||||
|
||||
// Format the curl command for testing
|
||||
const curlCommand = [
|
||||
`curl -X POST "${webhookUrl}"`,
|
||||
`-H "Content-Type: application/json"`,
|
||||
@@ -288,16 +279,13 @@ export async function GET(request: NextRequest) {
|
||||
}
|
||||
|
||||
case 'generic': {
|
||||
// Get the general webhook configuration
|
||||
const token = providerConfig.token
|
||||
const secretHeaderName = providerConfig.secretHeaderName
|
||||
const requireAuth = providerConfig.requireAuth
|
||||
const allowedIps = providerConfig.allowedIps
|
||||
|
||||
// Generate sample curl command for testing
|
||||
let curlCommand = `curl -X POST "${webhookUrl}" -H "Content-Type: application/json"`
|
||||
|
||||
// Add auth headers to the curl command if required
|
||||
if (requireAuth && token) {
|
||||
if (secretHeaderName) {
|
||||
curlCommand += ` -H "${secretHeaderName}: ${token}"`
|
||||
@@ -306,7 +294,6 @@ export async function GET(request: NextRequest) {
|
||||
}
|
||||
}
|
||||
|
||||
// Add a sample payload
|
||||
curlCommand += ` -d '{"event":"test_event","timestamp":"${new Date().toISOString()}"}'`
|
||||
|
||||
logger.info(`[${requestId}] General webhook test successful: ${webhookId}`)
|
||||
@@ -391,7 +378,6 @@ export async function GET(request: NextRequest) {
|
||||
})
|
||||
}
|
||||
|
||||
// Add the Airtable test case
|
||||
case 'airtable': {
|
||||
const baseId = providerConfig.baseId
|
||||
const tableId = providerConfig.tableId
|
||||
@@ -408,7 +394,6 @@ export async function GET(request: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
// Define a sample payload structure
|
||||
const samplePayload = {
|
||||
webhook: {
|
||||
id: 'whiYOUR_WEBHOOK_ID',
|
||||
@@ -418,16 +403,15 @@ export async function GET(request: NextRequest) {
|
||||
},
|
||||
payloadFormat: 'v0',
|
||||
actionMetadata: {
|
||||
source: 'tableOrViewChange', // Example source
|
||||
source: 'tableOrViewChange',
|
||||
sourceMetadata: {},
|
||||
},
|
||||
payloads: [
|
||||
{
|
||||
timestamp: new Date().toISOString(),
|
||||
baseTransactionNumber: Date.now(), // Example transaction number
|
||||
baseTransactionNumber: Date.now(),
|
||||
changedTablesById: {
|
||||
[tableId]: {
|
||||
// Example changes - structure may vary based on actual event
|
||||
changedRecordsById: {
|
||||
recSAMPLEID1: {
|
||||
current: { cellValuesByFieldId: { fldSAMPLEID: 'New Value' } },
|
||||
@@ -442,7 +426,6 @@ export async function GET(request: NextRequest) {
|
||||
],
|
||||
}
|
||||
|
||||
// Generate sample curl command
|
||||
let curlCommand = `curl -X POST "${webhookUrl}" -H "Content-Type: application/json"`
|
||||
curlCommand += ` -d '${JSON.stringify(samplePayload, null, 2)}'`
|
||||
|
||||
@@ -519,7 +502,6 @@ export async function GET(request: NextRequest) {
|
||||
}
|
||||
|
||||
default: {
|
||||
// Generic webhook test
|
||||
logger.info(`[${requestId}] Generic webhook test successful: ${webhookId}`)
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
|
||||
@@ -106,6 +106,24 @@ describe('Webhook Trigger API Route', () => {
|
||||
mockExecutionDependencies()
|
||||
mockTriggerDevSdk()
|
||||
|
||||
globalMockData.workflows.push({
|
||||
id: 'test-workflow-id',
|
||||
userId: 'test-user-id',
|
||||
pinnedApiKeyId: 'test-pinned-api-key-id',
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/api-key/service', async () => {
|
||||
const actual = await vi.importActual('@/lib/api-key/service')
|
||||
return {
|
||||
...(actual as Record<string, unknown>),
|
||||
getApiKeyOwnerUserId: vi
|
||||
.fn()
|
||||
.mockImplementation(async (pinnedApiKeyId: string | null | undefined) =>
|
||||
pinnedApiKeyId ? 'test-user-id' : null
|
||||
),
|
||||
}
|
||||
})
|
||||
|
||||
vi.doMock('@/services/queue', () => ({
|
||||
RateLimiter: vi.fn().mockImplementation(() => ({
|
||||
checkRateLimit: vi.fn().mockResolvedValue({
|
||||
@@ -222,6 +240,7 @@ describe('Webhook Trigger API Route', () => {
|
||||
globalMockData.workflows.push({
|
||||
id: 'test-workflow-id',
|
||||
userId: 'test-user-id',
|
||||
pinnedApiKeyId: 'test-pinned-api-key-id',
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', { event: 'test', id: 'test-123' })
|
||||
@@ -250,7 +269,11 @@ describe('Webhook Trigger API Route', () => {
|
||||
providerConfig: { requireAuth: true, token: 'test-token-123' },
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
globalMockData.workflows.push({
|
||||
id: 'test-workflow-id',
|
||||
userId: 'test-user-id',
|
||||
pinnedApiKeyId: 'test-pinned-api-key-id',
|
||||
})
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -281,7 +304,11 @@ describe('Webhook Trigger API Route', () => {
|
||||
},
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
globalMockData.workflows.push({
|
||||
id: 'test-workflow-id',
|
||||
userId: 'test-user-id',
|
||||
pinnedApiKeyId: 'test-pinned-api-key-id',
|
||||
})
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -308,7 +335,11 @@ describe('Webhook Trigger API Route', () => {
|
||||
providerConfig: { requireAuth: true, token: 'case-test-token' },
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
globalMockData.workflows.push({
|
||||
id: 'test-workflow-id',
|
||||
userId: 'test-user-id',
|
||||
pinnedApiKeyId: 'test-pinned-api-key-id',
|
||||
})
|
||||
|
||||
vi.doMock('@trigger.dev/sdk', () => ({
|
||||
tasks: {
|
||||
@@ -354,7 +385,11 @@ describe('Webhook Trigger API Route', () => {
|
||||
},
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
globalMockData.workflows.push({
|
||||
id: 'test-workflow-id',
|
||||
userId: 'test-user-id',
|
||||
pinnedApiKeyId: 'test-pinned-api-key-id',
|
||||
})
|
||||
|
||||
vi.doMock('@trigger.dev/sdk', () => ({
|
||||
tasks: {
|
||||
@@ -391,7 +426,6 @@ describe('Webhook Trigger API Route', () => {
|
||||
providerConfig: { requireAuth: true, token: 'correct-token' },
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -424,7 +458,6 @@ describe('Webhook Trigger API Route', () => {
|
||||
},
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -453,7 +486,6 @@ describe('Webhook Trigger API Route', () => {
|
||||
providerConfig: { requireAuth: true, token: 'required-token' },
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
|
||||
const req = createMockRequest('POST', { event: 'no.auth.test' })
|
||||
const params = Promise.resolve({ path: 'test-path' })
|
||||
@@ -482,7 +514,6 @@ describe('Webhook Trigger API Route', () => {
|
||||
},
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -515,7 +546,6 @@ describe('Webhook Trigger API Route', () => {
|
||||
},
|
||||
workflowId: 'test-workflow-id',
|
||||
})
|
||||
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
|
||||
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
import { db } from '@sim/db'
|
||||
import { workflow as workflowTable } from '@sim/db/schema'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { applyAutoLayout } from '@/lib/workflows/autolayout'
|
||||
import {
|
||||
loadWorkflowFromNormalizedTables,
|
||||
type NormalizedWorkflowData,
|
||||
} from '@/lib/workflows/db-helpers'
|
||||
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -77,11 +74,8 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
})
|
||||
|
||||
// Fetch the workflow to check ownership/access
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflowTable)
|
||||
.where(eq(workflowTable.id, workflowId))
|
||||
.then((rows) => rows[0])
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, userId)
|
||||
const workflowData = accessContext?.workflow
|
||||
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow ${workflowId} not found for autolayout`)
|
||||
@@ -89,24 +83,12 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
}
|
||||
|
||||
// Check if user has permission to update this workflow
|
||||
let canUpdate = false
|
||||
|
||||
// Case 1: User owns the workflow
|
||||
if (workflowData.userId === userId) {
|
||||
canUpdate = true
|
||||
}
|
||||
|
||||
// Case 2: Workflow belongs to a workspace and user has write or admin permission
|
||||
if (!canUpdate && workflowData.workspaceId) {
|
||||
const userPermission = await getUserEntityPermissions(
|
||||
userId,
|
||||
'workspace',
|
||||
workflowData.workspaceId
|
||||
)
|
||||
if (userPermission === 'write' || userPermission === 'admin') {
|
||||
canUpdate = true
|
||||
}
|
||||
}
|
||||
const canUpdate =
|
||||
accessContext?.isOwner ||
|
||||
(workflowData.workspaceId
|
||||
? accessContext?.workspacePermission === 'write' ||
|
||||
accessContext?.workspacePermission === 'admin'
|
||||
: false)
|
||||
|
||||
if (!canUpdate) {
|
||||
logger.warn(
|
||||
|
||||
@@ -293,6 +293,13 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
}
|
||||
}
|
||||
|
||||
// Attribution: this route is UI-only; require session user as actor
|
||||
const actorUserId: string | null = session?.user?.id ?? null
|
||||
if (!actorUserId) {
|
||||
logger.warn(`[${requestId}] Unable to resolve actor user for workflow deployment: ${id}`)
|
||||
return createErrorResponse('Unable to determine deploying user', 400)
|
||||
}
|
||||
|
||||
await db.transaction(async (tx) => {
|
||||
const [{ maxVersion }] = await tx
|
||||
.select({ maxVersion: sql`COALESCE(MAX("version"), 0)` })
|
||||
@@ -318,7 +325,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
state: currentState,
|
||||
isActive: true,
|
||||
createdAt: deployedAt,
|
||||
createdBy: userId,
|
||||
createdBy: actorUserId,
|
||||
})
|
||||
|
||||
const updateData: Record<string, unknown> = {
|
||||
|
||||
@@ -47,17 +47,17 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
// Duplicate workflow and all related data in a transaction
|
||||
const result = await db.transaction(async (tx) => {
|
||||
// First verify the source workflow exists
|
||||
const sourceWorkflow = await tx
|
||||
const sourceWorkflowRow = await tx
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, sourceWorkflowId))
|
||||
.limit(1)
|
||||
|
||||
if (sourceWorkflow.length === 0) {
|
||||
if (sourceWorkflowRow.length === 0) {
|
||||
throw new Error('Source workflow not found')
|
||||
}
|
||||
|
||||
const source = sourceWorkflow[0]
|
||||
const source = sourceWorkflowRow[0]
|
||||
|
||||
// Check if user has permission to access the source workflow
|
||||
let canAccessSource = false
|
||||
|
||||
@@ -5,9 +5,11 @@ import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { checkServerSideUsageLimits } from '@/lib/billing'
|
||||
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
|
||||
import { env } from '@/lib/env'
|
||||
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { LoggingSession } from '@/lib/logs/execution/logging-session'
|
||||
@@ -21,8 +23,13 @@ import {
|
||||
workflowHasResponseBlock,
|
||||
} from '@/lib/workflows/utils'
|
||||
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
|
||||
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
|
||||
import {
|
||||
createErrorResponse,
|
||||
createSuccessResponse,
|
||||
processApiWorkflowField,
|
||||
} from '@/app/api/workflows/utils'
|
||||
import { Executor } from '@/executor'
|
||||
import type { ExecutionResult } from '@/executor/types'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { RateLimitError, RateLimiter, type TriggerType } from '@/services/queue'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
@@ -32,15 +39,11 @@ const logger = createLogger('WorkflowExecuteAPI')
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'nodejs'
|
||||
|
||||
// Define the schema for environment variables
|
||||
const EnvVarsSchema = z.record(z.string())
|
||||
|
||||
// Keep track of running executions to prevent duplicate requests
|
||||
// Use a combination of workflow ID and request ID to allow concurrent executions with different inputs
|
||||
const runningExecutions = new Set<string>()
|
||||
|
||||
// Utility function to filter out logs and workflowConnections from API response
|
||||
function createFilteredResult(result: any) {
|
||||
export function createFilteredResult(result: any) {
|
||||
return {
|
||||
...result,
|
||||
logs: undefined,
|
||||
@@ -53,7 +56,6 @@ function createFilteredResult(result: any) {
|
||||
}
|
||||
}
|
||||
|
||||
// Custom error class for usage limit exceeded
|
||||
class UsageLimitError extends Error {
|
||||
statusCode: number
|
||||
constructor(message: string, statusCode = 402) {
|
||||
@@ -62,20 +64,72 @@ class UsageLimitError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
async function executeWorkflow(
|
||||
/**
|
||||
* Resolves output IDs to the internal blockId_attribute format
|
||||
* Supports both:
|
||||
* - User-facing format: blockName.path (e.g., "agent1.content")
|
||||
* - Internal format: blockId_attribute (e.g., "uuid_content") - used by chat deployments
|
||||
*/
|
||||
function resolveOutputIds(
|
||||
selectedOutputs: string[] | undefined,
|
||||
blocks: Record<string, any>
|
||||
): string[] | undefined {
|
||||
if (!selectedOutputs || selectedOutputs.length === 0) {
|
||||
return selectedOutputs
|
||||
}
|
||||
|
||||
const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
|
||||
|
||||
return selectedOutputs.map((outputId) => {
|
||||
if (UUID_REGEX.test(outputId)) {
|
||||
return outputId
|
||||
}
|
||||
|
||||
const dotIndex = outputId.indexOf('.')
|
||||
if (dotIndex === -1) {
|
||||
logger.warn(`Invalid output ID format (missing dot): ${outputId}`)
|
||||
return outputId
|
||||
}
|
||||
|
||||
const blockName = outputId.substring(0, dotIndex)
|
||||
const path = outputId.substring(dotIndex + 1)
|
||||
|
||||
const normalizedBlockName = blockName.toLowerCase().replace(/\s+/g, '')
|
||||
const block = Object.values(blocks).find((b: any) => {
|
||||
const normalized = (b.name || '').toLowerCase().replace(/\s+/g, '')
|
||||
return normalized === normalizedBlockName
|
||||
})
|
||||
|
||||
if (!block) {
|
||||
logger.warn(`Block not found for name: ${blockName} (from output ID: ${outputId})`)
|
||||
return outputId
|
||||
}
|
||||
|
||||
const resolvedId = `${block.id}_${path}`
|
||||
logger.debug(`Resolved output ID: ${outputId} -> ${resolvedId}`)
|
||||
return resolvedId
|
||||
})
|
||||
}
|
||||
|
||||
export async function executeWorkflow(
|
||||
workflow: any,
|
||||
requestId: string,
|
||||
input?: any,
|
||||
executingUserId?: string
|
||||
input: any | undefined,
|
||||
actorUserId: string,
|
||||
streamConfig?: {
|
||||
enabled: boolean
|
||||
selectedOutputs?: string[]
|
||||
isSecureMode?: boolean // When true, filter out all sensitive data
|
||||
workflowTriggerType?: 'api' | 'chat' // Which trigger block type to look for (default: 'api')
|
||||
onStream?: (streamingExec: any) => Promise<void> // Callback for streaming agent responses
|
||||
onBlockComplete?: (blockId: string, output: any) => Promise<void> // Callback when any block completes
|
||||
}
|
||||
): Promise<any> {
|
||||
const workflowId = workflow.id
|
||||
const executionId = uuidv4()
|
||||
|
||||
// Create a unique execution key combining workflow ID and request ID
|
||||
// This allows concurrent executions of the same workflow with different inputs
|
||||
const executionKey = `${workflowId}:${requestId}`
|
||||
|
||||
// Skip if this exact execution is already running (prevents duplicate requests)
|
||||
if (runningExecutions.has(executionKey)) {
|
||||
logger.warn(`[${requestId}] Execution is already running: ${executionKey}`)
|
||||
throw new Error('Execution is already running')
|
||||
@@ -83,10 +137,7 @@ async function executeWorkflow(
|
||||
|
||||
const loggingSession = new LoggingSession(workflowId, executionId, 'api', requestId)
|
||||
|
||||
// Rate limiting is now handled before entering the sync queue
|
||||
|
||||
// Check if the user has exceeded their usage limits
|
||||
const usageCheck = await checkServerSideUsageLimits(workflow.userId)
|
||||
const usageCheck = await checkServerSideUsageLimits(actorUserId)
|
||||
if (usageCheck.isExceeded) {
|
||||
logger.warn(`[${requestId}] User ${workflow.userId} has exceeded usage limits`, {
|
||||
currentUsage: usageCheck.currentUsage,
|
||||
@@ -97,13 +148,11 @@ async function executeWorkflow(
|
||||
)
|
||||
}
|
||||
|
||||
// Log input to help debug
|
||||
logger.info(
|
||||
`[${requestId}] Executing workflow with input:`,
|
||||
input ? JSON.stringify(input, null, 2) : 'No input provided'
|
||||
)
|
||||
|
||||
// Use input directly for API workflows
|
||||
const processedInput = input
|
||||
logger.info(
|
||||
`[${requestId}] Using input directly for workflow:`,
|
||||
@@ -114,10 +163,7 @@ async function executeWorkflow(
|
||||
runningExecutions.add(executionKey)
|
||||
logger.info(`[${requestId}] Starting workflow execution: ${workflowId}`)
|
||||
|
||||
// Load workflow data from deployed state for API executions
|
||||
const deployedData = await loadDeployedWorkflowState(workflowId)
|
||||
|
||||
// Use deployed data as primary source for API executions
|
||||
const { blocks, edges, loops, parallels } = deployedData
|
||||
logger.info(`[${requestId}] Using deployed state for workflow execution: ${workflowId}`)
|
||||
logger.debug(`[${requestId}] Deployed data loaded:`, {
|
||||
@@ -127,23 +173,20 @@ async function executeWorkflow(
|
||||
parallelsCount: Object.keys(parallels || {}).length,
|
||||
})
|
||||
|
||||
// Use the same execution flow as in scheduled executions
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
|
||||
// Load personal (for the executing user) and workspace env (workspace overrides personal)
|
||||
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
|
||||
executingUserId || workflow.userId,
|
||||
actorUserId,
|
||||
workflow.workspaceId || undefined
|
||||
)
|
||||
const variables = EnvVarsSchema.parse({ ...personalEncrypted, ...workspaceEncrypted })
|
||||
|
||||
await loggingSession.safeStart({
|
||||
userId: executingUserId || workflow.userId,
|
||||
userId: actorUserId,
|
||||
workspaceId: workflow.workspaceId,
|
||||
variables,
|
||||
})
|
||||
|
||||
// Replace environment variables in the block states
|
||||
const currentBlockStates = await Object.entries(mergedStates).reduce(
|
||||
async (accPromise, [id, block]) => {
|
||||
const acc = await accPromise
|
||||
@@ -152,13 +195,11 @@ async function executeWorkflow(
|
||||
const subAcc = await subAccPromise
|
||||
let value = subBlock.value
|
||||
|
||||
// If the value is a string and contains environment variable syntax
|
||||
if (typeof value === 'string' && value.includes('{{') && value.includes('}}')) {
|
||||
const matches = value.match(/{{([^}]+)}}/g)
|
||||
if (matches) {
|
||||
// Process all matches sequentially
|
||||
for (const match of matches) {
|
||||
const varName = match.slice(2, -2) // Remove {{ and }}
|
||||
const varName = match.slice(2, -2)
|
||||
const encryptedValue = variables[varName]
|
||||
if (!encryptedValue) {
|
||||
throw new Error(`Environment variable "${varName}" was not found`)
|
||||
@@ -190,7 +231,6 @@ async function executeWorkflow(
|
||||
Promise.resolve({} as Record<string, Record<string, any>>)
|
||||
)
|
||||
|
||||
// Create a map of decrypted environment variables
|
||||
const decryptedEnvVars: Record<string, string> = {}
|
||||
for (const [key, encryptedValue] of Object.entries(variables)) {
|
||||
try {
|
||||
@@ -202,22 +242,17 @@ async function executeWorkflow(
|
||||
}
|
||||
}
|
||||
|
||||
// Process the block states to ensure response formats are properly parsed
|
||||
const processedBlockStates = Object.entries(currentBlockStates).reduce(
|
||||
(acc, [blockId, blockState]) => {
|
||||
// Check if this block has a responseFormat that needs to be parsed
|
||||
if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
|
||||
const responseFormatValue = blockState.responseFormat.trim()
|
||||
|
||||
// Check for variable references like <start.input>
|
||||
if (responseFormatValue.startsWith('<') && responseFormatValue.includes('>')) {
|
||||
logger.debug(
|
||||
`[${requestId}] Response format contains variable reference for block ${blockId}`
|
||||
)
|
||||
// Keep variable references as-is - they will be resolved during execution
|
||||
acc[blockId] = blockState
|
||||
} else if (responseFormatValue === '') {
|
||||
// Empty string - remove response format
|
||||
acc[blockId] = {
|
||||
...blockState,
|
||||
responseFormat: undefined,
|
||||
@@ -225,7 +260,6 @@ async function executeWorkflow(
|
||||
} else {
|
||||
try {
|
||||
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
|
||||
// Attempt to parse the responseFormat if it's a string
|
||||
const parsedResponseFormat = JSON.parse(responseFormatValue)
|
||||
|
||||
acc[blockId] = {
|
||||
@@ -237,7 +271,6 @@ async function executeWorkflow(
|
||||
`[${requestId}] Failed to parse responseFormat for block ${blockId}, using undefined`,
|
||||
error
|
||||
)
|
||||
// Set to undefined instead of keeping malformed JSON - this allows execution to continue
|
||||
acc[blockId] = {
|
||||
...blockState,
|
||||
responseFormat: undefined,
|
||||
@@ -252,7 +285,6 @@ async function executeWorkflow(
|
||||
{} as Record<string, Record<string, any>>
|
||||
)
|
||||
|
||||
// Get workflow variables - they are stored as JSON objects in the database
|
||||
const workflowVariables = (workflow.variables as Record<string, any>) || {}
|
||||
|
||||
if (Object.keys(workflowVariables).length > 0) {
|
||||
@@ -263,32 +295,30 @@ async function executeWorkflow(
|
||||
logger.debug(`[${requestId}] No workflow variables found for: ${workflowId}`)
|
||||
}
|
||||
|
||||
// Serialize and execute the workflow
|
||||
logger.debug(`[${requestId}] Serializing workflow: ${workflowId}`)
|
||||
const serializedWorkflow = new Serializer().serializeWorkflow(
|
||||
mergedStates,
|
||||
edges,
|
||||
loops,
|
||||
parallels,
|
||||
true // Enable validation during execution
|
||||
true
|
||||
)
|
||||
|
||||
// Determine API trigger start block
|
||||
// Direct API execution ONLY works with API trigger blocks (or legacy starter in api/run mode)
|
||||
const startBlock = TriggerUtils.findStartBlock(mergedStates, 'api', false) // isChildWorkflow = false
|
||||
const preferredTriggerType = streamConfig?.workflowTriggerType || 'api'
|
||||
const startBlock = TriggerUtils.findStartBlock(mergedStates, preferredTriggerType, false)
|
||||
|
||||
if (!startBlock) {
|
||||
logger.error(`[${requestId}] No API trigger configured for this workflow`)
|
||||
throw new Error(
|
||||
'No API trigger configured for this workflow. Add an API Trigger block or use a Start block in API mode.'
|
||||
)
|
||||
const errorMsg =
|
||||
preferredTriggerType === 'api'
|
||||
? 'No API trigger block found. Add an API Trigger block to this workflow.'
|
||||
: 'No chat trigger block found. Add a Chat Trigger block to this workflow.'
|
||||
logger.error(`[${requestId}] ${errorMsg}`)
|
||||
throw new Error(errorMsg)
|
||||
}
|
||||
|
||||
const startBlockId = startBlock.blockId
|
||||
const triggerBlock = startBlock.block
|
||||
|
||||
// Check if the API trigger has any outgoing connections (except for legacy starter blocks)
|
||||
// Legacy starter blocks have their own validation in the executor
|
||||
if (triggerBlock.type !== 'starter') {
|
||||
const outgoingConnections = serializedWorkflow.connections.filter(
|
||||
(conn) => conn.source === startBlockId
|
||||
@@ -299,61 +329,74 @@ async function executeWorkflow(
|
||||
}
|
||||
}
|
||||
|
||||
const contextExtensions: any = {
|
||||
executionId,
|
||||
workspaceId: workflow.workspaceId,
|
||||
isDeployedContext: true,
|
||||
}
|
||||
|
||||
if (streamConfig?.enabled) {
|
||||
contextExtensions.stream = true
|
||||
contextExtensions.selectedOutputs = streamConfig.selectedOutputs || []
|
||||
contextExtensions.edges = edges.map((e: any) => ({
|
||||
source: e.source,
|
||||
target: e.target,
|
||||
}))
|
||||
contextExtensions.onStream = streamConfig.onStream
|
||||
contextExtensions.onBlockComplete = streamConfig.onBlockComplete
|
||||
}
|
||||
|
||||
const executor = new Executor({
|
||||
workflow: serializedWorkflow,
|
||||
currentBlockStates: processedBlockStates,
|
||||
envVarValues: decryptedEnvVars,
|
||||
workflowInput: processedInput,
|
||||
workflowVariables,
|
||||
contextExtensions: {
|
||||
executionId,
|
||||
workspaceId: workflow.workspaceId,
|
||||
isDeployedContext: true,
|
||||
},
|
||||
contextExtensions,
|
||||
})
|
||||
|
||||
// Set up logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
const result = await executor.execute(workflowId, startBlockId)
|
||||
|
||||
// Check if we got a StreamingExecution result (with stream + execution properties)
|
||||
// For API routes, we only care about the ExecutionResult part, not the stream
|
||||
const executionResult = 'stream' in result && 'execution' in result ? result.execution : result
|
||||
const result = (await executor.execute(workflowId, startBlockId)) as ExecutionResult
|
||||
|
||||
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
|
||||
success: executionResult.success,
|
||||
executionTime: executionResult.metadata?.duration,
|
||||
success: result.success,
|
||||
executionTime: result.metadata?.duration,
|
||||
})
|
||||
|
||||
// Build trace spans from execution result (works for both success and failure)
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(result)
|
||||
|
||||
// Update workflow run counts if execution was successful
|
||||
if (executionResult.success) {
|
||||
if (result.success) {
|
||||
await updateWorkflowRunCounts(workflowId)
|
||||
|
||||
// Track API call in user stats
|
||||
await db
|
||||
.update(userStats)
|
||||
.set({
|
||||
totalApiCalls: sql`total_api_calls + 1`,
|
||||
lastActive: sql`now()`,
|
||||
})
|
||||
.where(eq(userStats.userId, workflow.userId))
|
||||
.where(eq(userStats.userId, actorUserId))
|
||||
}
|
||||
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || 0,
|
||||
finalOutput: executionResult.output || {},
|
||||
finalOutput: result.output || {},
|
||||
traceSpans: (traceSpans || []) as any,
|
||||
workflowInput: processedInput,
|
||||
})
|
||||
|
||||
return executionResult
|
||||
return result
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
|
||||
|
||||
const executionResultForError = (error?.executionResult as ExecutionResult | undefined) || {
|
||||
success: false,
|
||||
output: {},
|
||||
logs: [],
|
||||
}
|
||||
const { traceSpans } = buildTraceSpans(executionResultForError)
|
||||
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
@@ -361,6 +404,7 @@ async function executeWorkflow(
|
||||
message: error.message || 'Workflow execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
traceSpans,
|
||||
})
|
||||
|
||||
throw error
|
||||
@@ -381,34 +425,38 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
|
||||
return createErrorResponse(validation.error.message, validation.error.status)
|
||||
}
|
||||
|
||||
// Determine trigger type based on authentication
|
||||
let triggerType: TriggerType = 'manual'
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
// Check for API key
|
||||
const apiKeyHeader = request.headers.get('X-API-Key')
|
||||
if (apiKeyHeader) {
|
||||
triggerType = 'api'
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Async execution is now handled in the POST handler below
|
||||
|
||||
// Synchronous execution
|
||||
try {
|
||||
// Check rate limits BEFORE entering queue for GET requests
|
||||
if (triggerType === 'api') {
|
||||
// Get user subscription (checks both personal and org subscriptions)
|
||||
const userSubscription = await getHighestPrioritySubscription(validation.workflow.userId)
|
||||
let actorUserId: string | null = null
|
||||
if (triggerType === 'manual') {
|
||||
actorUserId = session!.user!.id
|
||||
} else {
|
||||
const apiKeyHeader = request.headers.get('X-API-Key')
|
||||
const auth = apiKeyHeader ? await authenticateApiKeyFromHeader(apiKeyHeader) : null
|
||||
if (!auth?.success || !auth.userId) {
|
||||
return createErrorResponse('Unauthorized', 401)
|
||||
}
|
||||
actorUserId = auth.userId
|
||||
if (auth.keyId) {
|
||||
void updateApiKeyLastUsed(auth.keyId).catch(() => {})
|
||||
}
|
||||
|
||||
const userSubscription = await getHighestPrioritySubscription(actorUserId)
|
||||
const rateLimiter = new RateLimiter()
|
||||
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
|
||||
validation.workflow.userId,
|
||||
actorUserId,
|
||||
userSubscription,
|
||||
triggerType,
|
||||
false // isAsync = false for sync calls
|
||||
'api',
|
||||
false
|
||||
)
|
||||
|
||||
if (!rateLimitCheck.allowed) {
|
||||
throw new RateLimitError(
|
||||
`Rate limit exceeded. You have ${rateLimitCheck.remaining} requests remaining. Resets at ${rateLimitCheck.resetAt.toISOString()}`
|
||||
@@ -420,17 +468,14 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
|
||||
validation.workflow,
|
||||
requestId,
|
||||
undefined,
|
||||
// Executing user (manual run): if session present, use that user for fallback
|
||||
(await getSession())?.user?.id || undefined
|
||||
actorUserId as string
|
||||
)
|
||||
|
||||
// Check if the workflow execution contains a response block output
|
||||
const hasResponseBlock = workflowHasResponseBlock(result)
|
||||
if (hasResponseBlock) {
|
||||
return createHttpResponseFromBlock(result)
|
||||
}
|
||||
|
||||
// Filter out logs and workflowConnections from the API response
|
||||
const filteredResult = createFilteredResult(result)
|
||||
return createSuccessResponse(filteredResult)
|
||||
} catch (error: any) {
|
||||
@@ -446,12 +491,10 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Error executing workflow: ${id}`, error)
|
||||
|
||||
// Check if this is a rate limit error
|
||||
if (error instanceof RateLimitError) {
|
||||
return createErrorResponse(error.message, error.statusCode, 'RATE_LIMIT_EXCEEDED')
|
||||
}
|
||||
|
||||
// Check if this is a usage limit error
|
||||
if (error instanceof UsageLimitError) {
|
||||
return createErrorResponse(error.message, error.statusCode, 'USAGE_LIMIT_EXCEEDED')
|
||||
}
|
||||
@@ -476,54 +519,147 @@ export async function POST(
|
||||
const workflowId = id
|
||||
|
||||
try {
|
||||
// Validate workflow access
|
||||
const validation = await validateWorkflowAccess(request as NextRequest, id)
|
||||
if (validation.error) {
|
||||
logger.warn(`[${requestId}] Workflow access validation failed: ${validation.error.message}`)
|
||||
return createErrorResponse(validation.error.message, validation.error.status)
|
||||
}
|
||||
|
||||
// Check execution mode from header
|
||||
const executionMode = request.headers.get('X-Execution-Mode')
|
||||
const isAsync = executionMode === 'async'
|
||||
|
||||
// Parse request body
|
||||
const body = await request.text()
|
||||
logger.info(`[${requestId}] ${body ? 'Request body provided' : 'No request body provided'}`)
|
||||
|
||||
let input = {}
|
||||
let parsedBody: any = {}
|
||||
if (body) {
|
||||
try {
|
||||
input = JSON.parse(body)
|
||||
parsedBody = JSON.parse(body)
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to parse request body as JSON`, error)
|
||||
return createErrorResponse('Invalid JSON in request body', 400)
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Input passed to workflow:`, input)
|
||||
logger.info(`[${requestId}] Input passed to workflow:`, parsedBody)
|
||||
|
||||
// Get authenticated user and determine trigger type
|
||||
let authenticatedUserId: string | null = null
|
||||
let triggerType: TriggerType = 'manual'
|
||||
const extractExecutionParams = (req: NextRequest, body: any) => {
|
||||
const internalSecret = req.headers.get('X-Internal-Secret')
|
||||
const isInternalCall = internalSecret === env.INTERNAL_API_SECRET
|
||||
|
||||
const session = await getSession()
|
||||
if (session?.user?.id) {
|
||||
authenticatedUserId = session.user.id
|
||||
triggerType = 'manual' // UI session (not rate limited)
|
||||
} else {
|
||||
const apiKeyHeader = request.headers.get('X-API-Key')
|
||||
if (apiKeyHeader) {
|
||||
authenticatedUserId = validation.workflow.userId
|
||||
triggerType = 'api'
|
||||
return {
|
||||
isSecureMode: body.isSecureMode !== undefined ? body.isSecureMode : isInternalCall,
|
||||
streamResponse: req.headers.get('X-Stream-Response') === 'true' || body.stream === true,
|
||||
selectedOutputs:
|
||||
body.selectedOutputs ||
|
||||
(req.headers.get('X-Selected-Outputs')
|
||||
? JSON.parse(req.headers.get('X-Selected-Outputs')!)
|
||||
: undefined),
|
||||
workflowTriggerType:
|
||||
body.workflowTriggerType || (isInternalCall && body.stream ? 'chat' : 'api'),
|
||||
input: body.input !== undefined ? body.input : body,
|
||||
}
|
||||
}
|
||||
|
||||
if (!authenticatedUserId) {
|
||||
return createErrorResponse('Authentication required', 401)
|
||||
const {
|
||||
isSecureMode: finalIsSecureMode,
|
||||
streamResponse,
|
||||
selectedOutputs,
|
||||
workflowTriggerType,
|
||||
input: rawInput,
|
||||
} = extractExecutionParams(request as NextRequest, parsedBody)
|
||||
|
||||
let processedInput = rawInput
|
||||
logger.info(`[${requestId}] Raw input received:`, JSON.stringify(rawInput, null, 2))
|
||||
|
||||
try {
|
||||
const deployedData = await loadDeployedWorkflowState(workflowId)
|
||||
const blocks = deployedData.blocks || {}
|
||||
logger.info(`[${requestId}] Loaded ${Object.keys(blocks).length} blocks from workflow`)
|
||||
|
||||
const apiTriggerBlock = Object.values(blocks).find(
|
||||
(block: any) => block.type === 'api_trigger'
|
||||
) as any
|
||||
logger.info(`[${requestId}] API trigger block found:`, !!apiTriggerBlock)
|
||||
|
||||
if (apiTriggerBlock?.subBlocks?.inputFormat?.value) {
|
||||
const inputFormat = apiTriggerBlock.subBlocks.inputFormat.value as Array<{
|
||||
name: string
|
||||
type: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'files'
|
||||
}>
|
||||
logger.info(
|
||||
`[${requestId}] Input format fields:`,
|
||||
inputFormat.map((f) => `${f.name}:${f.type}`).join(', ')
|
||||
)
|
||||
|
||||
const fileFields = inputFormat.filter((field) => field.type === 'files')
|
||||
logger.info(`[${requestId}] Found ${fileFields.length} file-type fields`)
|
||||
|
||||
if (fileFields.length > 0 && typeof rawInput === 'object' && rawInput !== null) {
|
||||
const executionContext = {
|
||||
workspaceId: validation.workflow.workspaceId,
|
||||
workflowId,
|
||||
}
|
||||
|
||||
for (const fileField of fileFields) {
|
||||
const fieldValue = rawInput[fileField.name]
|
||||
|
||||
if (fieldValue && typeof fieldValue === 'object') {
|
||||
const uploadedFiles = await processApiWorkflowField(
|
||||
fieldValue,
|
||||
executionContext,
|
||||
requestId
|
||||
)
|
||||
|
||||
if (uploadedFiles.length > 0) {
|
||||
processedInput = {
|
||||
...processedInput,
|
||||
[fileField.name]: uploadedFiles,
|
||||
}
|
||||
logger.info(
|
||||
`[${requestId}] Successfully processed ${uploadedFiles.length} file(s) for field: ${fileField.name}`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to process file uploads:`, error)
|
||||
const errorMessage = error instanceof Error ? error.message : 'Failed to process file uploads'
|
||||
return createErrorResponse(errorMessage, 400)
|
||||
}
|
||||
|
||||
const input = processedInput
|
||||
|
||||
let authenticatedUserId: string
|
||||
let triggerType: TriggerType = 'manual'
|
||||
|
||||
if (finalIsSecureMode) {
|
||||
authenticatedUserId = validation.workflow.userId
|
||||
triggerType = 'manual'
|
||||
} else {
|
||||
const session = await getSession()
|
||||
const apiKeyHeader = request.headers.get('X-API-Key')
|
||||
|
||||
if (session?.user?.id && !apiKeyHeader) {
|
||||
authenticatedUserId = session.user.id
|
||||
triggerType = 'manual'
|
||||
} else if (apiKeyHeader) {
|
||||
const auth = await authenticateApiKeyFromHeader(apiKeyHeader)
|
||||
if (!auth.success || !auth.userId) {
|
||||
return createErrorResponse('Unauthorized', 401)
|
||||
}
|
||||
authenticatedUserId = auth.userId
|
||||
triggerType = 'api'
|
||||
if (auth.keyId) {
|
||||
void updateApiKeyLastUsed(auth.keyId).catch(() => {})
|
||||
}
|
||||
} else {
|
||||
return createErrorResponse('Authentication required', 401)
|
||||
}
|
||||
}
|
||||
|
||||
// Get user subscription (checks both personal and org subscriptions)
|
||||
const userSubscription = await getHighestPrioritySubscription(authenticatedUserId)
|
||||
|
||||
if (isAsync) {
|
||||
@@ -533,7 +669,7 @@ export async function POST(
|
||||
authenticatedUserId,
|
||||
userSubscription,
|
||||
'api',
|
||||
true // isAsync = true
|
||||
true
|
||||
)
|
||||
|
||||
if (!rateLimitCheck.allowed) {
|
||||
@@ -557,7 +693,6 @@ export async function POST(
|
||||
)
|
||||
}
|
||||
|
||||
// Rate limit passed - always use Trigger.dev for async executions
|
||||
const handle = await tasks.trigger('workflow-execution', {
|
||||
workflowId,
|
||||
userId: authenticatedUserId,
|
||||
@@ -597,7 +732,7 @@ export async function POST(
|
||||
authenticatedUserId,
|
||||
userSubscription,
|
||||
triggerType,
|
||||
false // isAsync = false for sync calls
|
||||
false
|
||||
)
|
||||
|
||||
if (!rateLimitCheck.allowed) {
|
||||
@@ -606,11 +741,40 @@ export async function POST(
|
||||
)
|
||||
}
|
||||
|
||||
if (streamResponse) {
|
||||
const deployedData = await loadDeployedWorkflowState(workflowId)
|
||||
const resolvedSelectedOutputs = selectedOutputs
|
||||
? resolveOutputIds(selectedOutputs, deployedData.blocks || {})
|
||||
: selectedOutputs
|
||||
|
||||
const { createStreamingResponse } = await import('@/lib/workflows/streaming')
|
||||
const { SSE_HEADERS } = await import('@/lib/utils')
|
||||
|
||||
const stream = await createStreamingResponse({
|
||||
requestId,
|
||||
workflow: validation.workflow,
|
||||
input,
|
||||
executingUserId: authenticatedUserId,
|
||||
streamConfig: {
|
||||
selectedOutputs: resolvedSelectedOutputs,
|
||||
isSecureMode: finalIsSecureMode,
|
||||
workflowTriggerType,
|
||||
},
|
||||
createFilteredResult,
|
||||
})
|
||||
|
||||
return new NextResponse(stream, {
|
||||
status: 200,
|
||||
headers: SSE_HEADERS,
|
||||
})
|
||||
}
|
||||
|
||||
const result = await executeWorkflow(
|
||||
validation.workflow,
|
||||
requestId,
|
||||
input,
|
||||
authenticatedUserId
|
||||
authenticatedUserId,
|
||||
undefined
|
||||
)
|
||||
|
||||
const hasResponseBlock = workflowHasResponseBlock(result)
|
||||
@@ -618,7 +782,6 @@ export async function POST(
|
||||
return createHttpResponseFromBlock(result)
|
||||
}
|
||||
|
||||
// Filter out logs and workflowConnections from the API response
|
||||
const filteredResult = createFilteredResult(result)
|
||||
return createSuccessResponse(filteredResult)
|
||||
} catch (error: any) {
|
||||
@@ -634,17 +797,14 @@ export async function POST(
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Error executing workflow: ${workflowId}`, error)
|
||||
|
||||
// Check if this is a rate limit error
|
||||
if (error instanceof RateLimitError) {
|
||||
return createErrorResponse(error.message, error.statusCode, 'RATE_LIMIT_EXCEEDED')
|
||||
}
|
||||
|
||||
// Check if this is a usage limit error
|
||||
if (error instanceof UsageLimitError) {
|
||||
return createErrorResponse(error.message, error.statusCode, 'USAGE_LIMIT_EXCEEDED')
|
||||
}
|
||||
|
||||
// Check if this is a rate limit error (string match for backward compatibility)
|
||||
if (error.message?.includes('Rate limit exceeded')) {
|
||||
return createErrorResponse(error.message, 429, 'RATE_LIMIT_EXCEEDED')
|
||||
}
|
||||
|
||||
@@ -44,15 +44,17 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
variables: {},
|
||||
})
|
||||
|
||||
const { traceSpans } = buildTraceSpans(result)
|
||||
|
||||
if (result.success === false) {
|
||||
const message = result.error || 'Workflow execution failed'
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: result.metadata?.duration || 0,
|
||||
error: { message },
|
||||
traceSpans,
|
||||
})
|
||||
} else {
|
||||
const { traceSpans } = buildTraceSpans(result)
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: result.metadata?.duration || 0,
|
||||
|
||||
@@ -16,6 +16,9 @@ describe('Workflow By ID API Route', () => {
|
||||
error: vi.fn(),
|
||||
}
|
||||
|
||||
const mockGetWorkflowById = vi.fn()
|
||||
const mockGetWorkflowAccessContext = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
|
||||
@@ -30,6 +33,20 @@ describe('Workflow By ID API Route', () => {
|
||||
vi.doMock('@/lib/workflows/db-helpers', () => ({
|
||||
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(null),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockReset()
|
||||
mockGetWorkflowAccessContext.mockReset()
|
||||
|
||||
vi.doMock('@/lib/workflows/utils', async () => {
|
||||
const actual =
|
||||
await vi.importActual<typeof import('@/lib/workflows/utils')>('@/lib/workflows/utils')
|
||||
|
||||
return {
|
||||
...actual,
|
||||
getWorkflowById: mockGetWorkflowById,
|
||||
getWorkflowAccessContext: mockGetWorkflowAccessContext,
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -60,17 +77,14 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(undefined),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(null)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: null,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/nonexistent')
|
||||
const params = Promise.resolve({ id: 'nonexistent' })
|
||||
@@ -105,22 +119,28 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/workflows/db-helpers', () => ({
|
||||
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(mockNormalizedData),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123')
|
||||
const params = Promise.resolve({ id: 'workflow-123' })
|
||||
|
||||
@@ -154,22 +174,28 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: 'admin',
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/workflows/db-helpers', () => ({
|
||||
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(mockNormalizedData),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: 'read',
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
|
||||
hasAdminPermission: vi.fn().mockResolvedValue(false),
|
||||
@@ -200,22 +226,14 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue(null),
|
||||
hasAdminPermission: vi.fn().mockResolvedValue(false),
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: null,
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123')
|
||||
const params = Promise.resolve({ id: 'workflow-123' })
|
||||
@@ -250,17 +268,14 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/workflows/db-helpers', () => ({
|
||||
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(mockNormalizedData),
|
||||
@@ -294,19 +309,22 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
delete: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockResolvedValue(undefined),
|
||||
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
|
||||
}),
|
||||
},
|
||||
workflow: {},
|
||||
}))
|
||||
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
@@ -340,24 +358,22 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: 'admin',
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
delete: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockResolvedValue(undefined),
|
||||
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue('admin'),
|
||||
hasAdminPermission: vi.fn().mockResolvedValue(true),
|
||||
workflow: {},
|
||||
}))
|
||||
|
||||
global.fetch = vi.fn().mockResolvedValue({
|
||||
@@ -391,22 +407,14 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
|
||||
hasAdminPermission: vi.fn().mockResolvedValue(false),
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: null,
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
|
||||
method: 'DELETE',
|
||||
@@ -432,6 +440,7 @@ describe('Workflow By ID API Route', () => {
|
||||
}
|
||||
|
||||
const updateData = { name: 'Updated Workflow' }
|
||||
const updatedWorkflow = { ...mockWorkflow, ...updateData, updatedAt: new Date() }
|
||||
|
||||
vi.doMock('@/lib/auth', () => ({
|
||||
getSession: vi.fn().mockResolvedValue({
|
||||
@@ -439,23 +448,26 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
update: vi.fn().mockReturnValue({
|
||||
set: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
returning: vi.fn().mockResolvedValue([{ ...mockWorkflow, ...updateData }]),
|
||||
returning: vi.fn().mockResolvedValue([updatedWorkflow]),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
workflow: {},
|
||||
}))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
|
||||
@@ -481,6 +493,7 @@ describe('Workflow By ID API Route', () => {
|
||||
}
|
||||
|
||||
const updateData = { name: 'Updated Workflow' }
|
||||
const updatedWorkflow = { ...mockWorkflow, ...updateData, updatedAt: new Date() }
|
||||
|
||||
vi.doMock('@/lib/auth', () => ({
|
||||
getSession: vi.fn().mockResolvedValue({
|
||||
@@ -488,28 +501,26 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: 'write',
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
update: vi.fn().mockReturnValue({
|
||||
set: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
returning: vi.fn().mockResolvedValue([{ ...mockWorkflow, ...updateData }]),
|
||||
returning: vi.fn().mockResolvedValue([updatedWorkflow]),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue('write'),
|
||||
hasAdminPermission: vi.fn().mockResolvedValue(false),
|
||||
workflow: {},
|
||||
}))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
|
||||
@@ -542,22 +553,14 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
|
||||
hasAdminPermission: vi.fn().mockResolvedValue(false),
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-456',
|
||||
workspacePermission: 'read',
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
|
||||
method: 'PUT',
|
||||
@@ -587,17 +590,14 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockResolvedValue(mockWorkflow),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
// Invalid data - empty name
|
||||
const invalidData = { name: '' }
|
||||
@@ -625,17 +625,7 @@ describe('Workflow By ID API Route', () => {
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => ({
|
||||
db: {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
then: vi.fn().mockRejectedValue(new Error('Database connection timeout')),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
}))
|
||||
mockGetWorkflowById.mockRejectedValueOnce(new Error('Database connection timeout'))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123')
|
||||
const params = Promise.resolve({ id: 'workflow-123' })
|
||||
|
||||
@@ -8,9 +8,9 @@ import { getSession } from '@/lib/auth'
|
||||
import { verifyInternalToken } from '@/lib/auth/internal'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getUserEntityPermissions, hasAdminPermission } from '@/lib/permissions/utils'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
|
||||
import { getWorkflowAccessContext, getWorkflowById } from '@/lib/workflows/utils'
|
||||
|
||||
const logger = createLogger('WorkflowByIdAPI')
|
||||
|
||||
@@ -74,12 +74,8 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
|
||||
userId = authenticatedUserId
|
||||
}
|
||||
|
||||
// Fetch the workflow
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.then((rows) => rows[0])
|
||||
let accessContext = null
|
||||
let workflowData = await getWorkflowById(workflowId)
|
||||
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
|
||||
@@ -94,18 +90,21 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
|
||||
hasAccess = true
|
||||
} else {
|
||||
// Case 1: User owns the workflow
|
||||
if (workflowData.userId === userId) {
|
||||
hasAccess = true
|
||||
}
|
||||
if (workflowData) {
|
||||
accessContext = await getWorkflowAccessContext(workflowId, userId ?? undefined)
|
||||
|
||||
// Case 2: Workflow belongs to a workspace the user has permissions for
|
||||
if (!hasAccess && workflowData.workspaceId && userId) {
|
||||
const userPermission = await getUserEntityPermissions(
|
||||
userId,
|
||||
'workspace',
|
||||
workflowData.workspaceId
|
||||
)
|
||||
if (userPermission !== null) {
|
||||
if (!accessContext) {
|
||||
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
|
||||
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
workflowData = accessContext.workflow
|
||||
|
||||
if (accessContext.isOwner) {
|
||||
hasAccess = true
|
||||
}
|
||||
|
||||
if (!hasAccess && workflowData.workspaceId && accessContext.workspacePermission) {
|
||||
hasAccess = true
|
||||
}
|
||||
}
|
||||
@@ -179,11 +178,8 @@ export async function DELETE(
|
||||
|
||||
const userId = session.user.id
|
||||
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.then((rows) => rows[0])
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, userId)
|
||||
const workflowData = accessContext?.workflow || (await getWorkflowById(workflowId))
|
||||
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow ${workflowId} not found for deletion`)
|
||||
@@ -200,8 +196,8 @@ export async function DELETE(
|
||||
|
||||
// Case 2: Workflow belongs to a workspace and user has admin permission
|
||||
if (!canDelete && workflowData.workspaceId) {
|
||||
const hasAdmin = await hasAdminPermission(userId, workflowData.workspaceId)
|
||||
if (hasAdmin) {
|
||||
const context = accessContext || (await getWorkflowAccessContext(workflowId, userId))
|
||||
if (context?.workspacePermission === 'admin') {
|
||||
canDelete = true
|
||||
}
|
||||
}
|
||||
@@ -320,11 +316,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
const updates = UpdateWorkflowSchema.parse(body)
|
||||
|
||||
// Fetch the workflow to check ownership/access
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.then((rows) => rows[0])
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, userId)
|
||||
const workflowData = accessContext?.workflow || (await getWorkflowById(workflowId))
|
||||
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow ${workflowId} not found for update`)
|
||||
@@ -341,12 +334,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
|
||||
// Case 2: Workflow belongs to a workspace and user has write or admin permission
|
||||
if (!canUpdate && workflowData.workspaceId) {
|
||||
const userPermission = await getUserEntityPermissions(
|
||||
userId,
|
||||
'workspace',
|
||||
workflowData.workspaceId
|
||||
)
|
||||
if (userPermission === 'write' || userPermission === 'admin') {
|
||||
const context = accessContext || (await getWorkflowAccessContext(workflowId, userId))
|
||||
if (context?.workspacePermission === 'write' || context?.workspacePermission === 'admin') {
|
||||
canUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { extractAndPersistCustomTools } from '@/lib/workflows/custom-tools-persistence'
|
||||
import { saveWorkflowToNormalizedTables } from '@/lib/workflows/db-helpers'
|
||||
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
|
||||
import { sanitizeAgentToolsInBlocks } from '@/lib/workflows/validation'
|
||||
|
||||
const logger = createLogger('WorkflowStateAPI')
|
||||
@@ -124,11 +124,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
const state = WorkflowStateSchema.parse(body)
|
||||
|
||||
// Fetch the workflow to check ownership/access
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.then((rows) => rows[0])
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, userId)
|
||||
const workflowData = accessContext?.workflow
|
||||
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow ${workflowId} not found for state update`)
|
||||
@@ -136,24 +133,12 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
}
|
||||
|
||||
// Check if user has permission to update this workflow
|
||||
let canUpdate = false
|
||||
|
||||
// Case 1: User owns the workflow
|
||||
if (workflowData.userId === userId) {
|
||||
canUpdate = true
|
||||
}
|
||||
|
||||
// Case 2: Workflow belongs to a workspace and user has write or admin permission
|
||||
if (!canUpdate && workflowData.workspaceId) {
|
||||
const userPermission = await getUserEntityPermissions(
|
||||
userId,
|
||||
'workspace',
|
||||
workflowData.workspaceId
|
||||
)
|
||||
if (userPermission === 'write' || userPermission === 'admin') {
|
||||
canUpdate = true
|
||||
}
|
||||
}
|
||||
const canUpdate =
|
||||
accessContext?.isOwner ||
|
||||
(workflowData.workspaceId
|
||||
? accessContext?.workspacePermission === 'write' ||
|
||||
accessContext?.workspacePermission === 'admin'
|
||||
: false)
|
||||
|
||||
if (!canUpdate) {
|
||||
logger.warn(
|
||||
|
||||
@@ -18,12 +18,18 @@ import {
|
||||
describe('Workflow Variables API Route', () => {
|
||||
let authMocks: ReturnType<typeof mockAuth>
|
||||
let databaseMocks: ReturnType<typeof createMockDatabase>
|
||||
const mockGetWorkflowAccessContext = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid('mock-request-id-12345678')
|
||||
authMocks = mockAuth(mockUser)
|
||||
mockGetWorkflowAccessContext.mockReset()
|
||||
|
||||
vi.doMock('@/lib/workflows/utils', () => ({
|
||||
getWorkflowAccessContext: mockGetWorkflowAccessContext,
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -47,9 +53,7 @@ describe('Workflow Variables API Route', () => {
|
||||
|
||||
it('should return 404 when workflow does not exist', async () => {
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[]] }, // No workflow found
|
||||
})
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce(null)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/nonexistent/variables')
|
||||
const params = Promise.resolve({ id: 'nonexistent' })
|
||||
@@ -73,8 +77,12 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
|
||||
@@ -99,14 +107,14 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-owner',
|
||||
workspacePermission: 'read',
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
|
||||
}))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
|
||||
const params = Promise.resolve({ id: 'workflow-123' })
|
||||
|
||||
@@ -116,14 +124,6 @@ describe('Workflow Variables API Route', () => {
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data.data).toEqual(mockWorkflow.variables)
|
||||
|
||||
// Verify permissions check was called
|
||||
const { getUserEntityPermissions } = await import('@/lib/permissions/utils')
|
||||
expect(getUserEntityPermissions).toHaveBeenCalledWith(
|
||||
'user-123',
|
||||
'workspace',
|
||||
'workspace-456'
|
||||
)
|
||||
})
|
||||
|
||||
it('should deny access when user has no workspace permissions', async () => {
|
||||
@@ -135,14 +135,14 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-owner',
|
||||
workspacePermission: null,
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue(null),
|
||||
}))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
|
||||
const params = Promise.resolve({ id: 'workflow-123' })
|
||||
|
||||
@@ -165,8 +165,12 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
|
||||
@@ -191,8 +195,15 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
update: { results: [{}] },
|
||||
})
|
||||
|
||||
@@ -223,14 +234,14 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: 'workspace-owner',
|
||||
workspacePermission: null,
|
||||
isOwner: false,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: vi.fn().mockResolvedValue(null),
|
||||
}))
|
||||
|
||||
const variables = [
|
||||
{ id: 'var-1', workflowId: 'workflow-123', name: 'test', type: 'string', value: 'hello' },
|
||||
]
|
||||
@@ -258,8 +269,12 @@ describe('Workflow Variables API Route', () => {
|
||||
}
|
||||
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { results: [[mockWorkflow]] },
|
||||
mockGetWorkflowAccessContext.mockResolvedValueOnce({
|
||||
workflow: mockWorkflow,
|
||||
workspaceOwnerId: null,
|
||||
workspacePermission: null,
|
||||
isOwner: true,
|
||||
isWorkspaceOwner: false,
|
||||
})
|
||||
|
||||
// Invalid data - missing required fields
|
||||
@@ -283,9 +298,7 @@ describe('Workflow Variables API Route', () => {
|
||||
describe('Error handling', () => {
|
||||
it.concurrent('should handle database errors gracefully', async () => {
|
||||
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
|
||||
databaseMocks = createMockDatabase({
|
||||
select: { throwError: true, errorMessage: 'Database connection failed' },
|
||||
})
|
||||
mockGetWorkflowAccessContext.mockRejectedValueOnce(new Error('Database connection failed'))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
|
||||
const params = Promise.resolve({ id: 'workflow-123' })
|
||||
|
||||
@@ -5,8 +5,8 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
|
||||
import type { Variable } from '@/stores/panel/variables/types'
|
||||
|
||||
const logger = createLogger('WorkflowVariablesAPI')
|
||||
@@ -35,32 +35,18 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
}
|
||||
|
||||
// Get the workflow record
|
||||
const workflowRecord = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, session.user.id)
|
||||
const workflowData = accessContext?.workflow
|
||||
|
||||
if (!workflowRecord.length) {
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow not found: ${workflowId}`)
|
||||
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const workflowData = workflowRecord[0]
|
||||
const workspaceId = workflowData.workspaceId
|
||||
|
||||
// Check authorization - either the user owns the workflow or has workspace permissions
|
||||
let isAuthorized = workflowData.userId === session.user.id
|
||||
|
||||
// If not authorized by ownership and the workflow belongs to a workspace, check workspace permissions
|
||||
if (!isAuthorized && workspaceId) {
|
||||
const userPermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
workspaceId
|
||||
)
|
||||
isAuthorized = userPermission !== null
|
||||
}
|
||||
const isAuthorized =
|
||||
accessContext?.isOwner || (workspaceId ? accessContext?.workspacePermission !== null : false)
|
||||
|
||||
if (!isAuthorized) {
|
||||
logger.warn(
|
||||
@@ -125,32 +111,18 @@ export async function GET(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
}
|
||||
|
||||
// Get the workflow record
|
||||
const workflowRecord = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, session.user.id)
|
||||
const workflowData = accessContext?.workflow
|
||||
|
||||
if (!workflowRecord.length) {
|
||||
if (!workflowData) {
|
||||
logger.warn(`[${requestId}] Workflow not found: ${workflowId}`)
|
||||
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const workflowData = workflowRecord[0]
|
||||
const workspaceId = workflowData.workspaceId
|
||||
|
||||
// Check authorization - either the user owns the workflow or has workspace permissions
|
||||
let isAuthorized = workflowData.userId === session.user.id
|
||||
|
||||
// If not authorized by ownership and the workflow belongs to a workspace, check workspace permissions
|
||||
if (!isAuthorized && workspaceId) {
|
||||
const userPermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
workspaceId
|
||||
)
|
||||
isAuthorized = userPermission !== null
|
||||
}
|
||||
const isAuthorized =
|
||||
accessContext?.isOwner || (workspaceId ? accessContext?.workspacePermission !== null : false)
|
||||
|
||||
if (!isAuthorized) {
|
||||
logger.warn(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { authenticateApiKey } from '@/lib/api-key/auth'
|
||||
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getWorkflowById } from '@/lib/workflows/utils'
|
||||
|
||||
@@ -37,7 +38,11 @@ export async function validateWorkflowAccess(
|
||||
}
|
||||
}
|
||||
|
||||
// API key authentication
|
||||
const internalSecret = request.headers.get('X-Internal-Secret')
|
||||
if (internalSecret === env.INTERNAL_API_SECRET) {
|
||||
return { workflow }
|
||||
}
|
||||
|
||||
let apiKeyHeader = null
|
||||
for (const [key, value] of request.headers.entries()) {
|
||||
if (key.toLowerCase() === 'x-api-key' && value) {
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { uploadExecutionFile } from '@/lib/workflows/execution-file-storage'
|
||||
import type { UserFile } from '@/executor/types'
|
||||
|
||||
const logger = createLogger('WorkflowUtils')
|
||||
|
||||
const MAX_FILE_SIZE = 20 * 1024 * 1024 // 20MB
|
||||
|
||||
export function createErrorResponse(error: string, status: number, code?: string) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
@@ -37,3 +42,99 @@ export async function verifyWorkspaceMembership(
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process API workflow files - handles both base64 ('file' type) and URL pass-through ('url' type)
|
||||
*/
|
||||
export async function processApiWorkflowFiles(
|
||||
file: { type: string; data: string; name: string; mime?: string },
|
||||
executionContext: { workspaceId: string; workflowId: string; executionId: string },
|
||||
requestId: string
|
||||
): Promise<UserFile | null> {
|
||||
if (file.type === 'file' && file.data && file.name) {
|
||||
const dataUrlPrefix = 'data:'
|
||||
const base64Prefix = ';base64,'
|
||||
|
||||
if (!file.data.startsWith(dataUrlPrefix)) {
|
||||
logger.warn(`[${requestId}] Invalid data format for file: ${file.name}`)
|
||||
return null
|
||||
}
|
||||
|
||||
const base64Index = file.data.indexOf(base64Prefix)
|
||||
if (base64Index === -1) {
|
||||
logger.warn(`[${requestId}] Invalid data format (no base64 marker) for file: ${file.name}`)
|
||||
return null
|
||||
}
|
||||
|
||||
const mimeType = file.data.substring(dataUrlPrefix.length, base64Index)
|
||||
const base64Data = file.data.substring(base64Index + base64Prefix.length)
|
||||
const buffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
if (buffer.length > MAX_FILE_SIZE) {
|
||||
const fileSizeMB = (buffer.length / (1024 * 1024)).toFixed(2)
|
||||
throw new Error(
|
||||
`File "${file.name}" exceeds the maximum size limit of 20MB (actual size: ${fileSizeMB}MB)`
|
||||
)
|
||||
}
|
||||
|
||||
logger.debug(`[${requestId}] Uploading file: ${file.name} (${buffer.length} bytes)`)
|
||||
|
||||
const userFile = await uploadExecutionFile(
|
||||
executionContext,
|
||||
buffer,
|
||||
file.name,
|
||||
mimeType || file.mime || 'application/octet-stream'
|
||||
)
|
||||
|
||||
logger.debug(`[${requestId}] Successfully uploaded ${file.name}`)
|
||||
return userFile
|
||||
}
|
||||
|
||||
if (file.type === 'url' && file.data) {
|
||||
return {
|
||||
id: uuidv4(),
|
||||
url: file.data,
|
||||
name: file.name,
|
||||
size: 0,
|
||||
type: file.mime || 'application/octet-stream',
|
||||
key: `url/${file.name}`,
|
||||
uploadedAt: new Date().toISOString(),
|
||||
expiresAt: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Process all files for a given field in the API workflow input
|
||||
*/
|
||||
export async function processApiWorkflowField(
|
||||
fieldValue: any,
|
||||
executionContext: { workspaceId: string; workflowId: string },
|
||||
requestId: string
|
||||
): Promise<UserFile[]> {
|
||||
if (!fieldValue || typeof fieldValue !== 'object') {
|
||||
return []
|
||||
}
|
||||
|
||||
const files = Array.isArray(fieldValue) ? fieldValue : [fieldValue]
|
||||
const uploadedFiles: UserFile[] = []
|
||||
const executionId = uuidv4()
|
||||
const fullContext = { ...executionContext, executionId }
|
||||
|
||||
for (const file of files) {
|
||||
try {
|
||||
const userFile = await processApiWorkflowFiles(file, fullContext, requestId)
|
||||
|
||||
if (userFile) {
|
||||
uploadedFiles.push(userFile)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to process file ${file.name}:`, error)
|
||||
throw new Error(`Failed to upload file: ${file.name}`)
|
||||
}
|
||||
}
|
||||
|
||||
return uploadedFiles
|
||||
}
|
||||
|
||||
@@ -45,6 +45,18 @@ const DEFAULT_VOICE_SETTINGS = {
|
||||
voiceId: 'EXAVITQu4vr4xnSDxMaL', // Default ElevenLabs voice (Bella)
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a File object to a base64 data URL
|
||||
*/
|
||||
function fileToBase64(file: File): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader()
|
||||
reader.onload = () => resolve(reader.result as string)
|
||||
reader.onerror = reject
|
||||
reader.readAsDataURL(file)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an audio stream handler for text-to-speech conversion
|
||||
* @param streamTextToAudio - Function to stream text to audio
|
||||
@@ -265,20 +277,43 @@ export default function ChatClient({ identifier }: { identifier: string }) {
|
||||
}
|
||||
|
||||
// Handle sending a message
|
||||
const handleSendMessage = async (messageParam?: string, isVoiceInput = false) => {
|
||||
const handleSendMessage = async (
|
||||
messageParam?: string,
|
||||
isVoiceInput = false,
|
||||
files?: Array<{
|
||||
id: string
|
||||
name: string
|
||||
size: number
|
||||
type: string
|
||||
file: File
|
||||
dataUrl?: string
|
||||
}>
|
||||
) => {
|
||||
const messageToSend = messageParam ?? inputValue
|
||||
if (!messageToSend.trim() || isLoading) return
|
||||
if ((!messageToSend.trim() && (!files || files.length === 0)) || isLoading) return
|
||||
|
||||
logger.info('Sending message:', { messageToSend, isVoiceInput, conversationId })
|
||||
logger.info('Sending message:', {
|
||||
messageToSend,
|
||||
isVoiceInput,
|
||||
conversationId,
|
||||
filesCount: files?.length,
|
||||
})
|
||||
|
||||
// Reset userHasScrolled when sending a new message
|
||||
setUserHasScrolled(false)
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
content: messageToSend,
|
||||
content: messageToSend || (files && files.length > 0 ? `Sent ${files.length} file(s)` : ''),
|
||||
type: 'user',
|
||||
timestamp: new Date(),
|
||||
attachments: files?.map((file) => ({
|
||||
id: file.id,
|
||||
name: file.name,
|
||||
type: file.type,
|
||||
size: file.size,
|
||||
dataUrl: file.dataUrl || '',
|
||||
})),
|
||||
}
|
||||
|
||||
// Add the user's message to the chat
|
||||
@@ -299,7 +334,7 @@ export default function ChatClient({ identifier }: { identifier: string }) {
|
||||
|
||||
try {
|
||||
// Send structured payload to maintain chat context
|
||||
const payload = {
|
||||
const payload: any = {
|
||||
input:
|
||||
typeof userMessage.content === 'string'
|
||||
? userMessage.content
|
||||
@@ -307,7 +342,22 @@ export default function ChatClient({ identifier }: { identifier: string }) {
|
||||
conversationId,
|
||||
}
|
||||
|
||||
logger.info('API payload:', payload)
|
||||
// Add files if present (convert to base64 for JSON transmission)
|
||||
if (files && files.length > 0) {
|
||||
payload.files = await Promise.all(
|
||||
files.map(async (file) => ({
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
type: file.type,
|
||||
dataUrl: file.dataUrl || (await fileToBase64(file.file)),
|
||||
}))
|
||||
)
|
||||
}
|
||||
|
||||
logger.info('API payload:', {
|
||||
...payload,
|
||||
files: payload.files ? `${payload.files.length} files` : undefined,
|
||||
})
|
||||
|
||||
const response = await fetch(`/api/chat/${identifier}`, {
|
||||
method: 'POST',
|
||||
@@ -499,8 +549,8 @@ export default function ChatClient({ identifier }: { identifier: string }) {
|
||||
<div className='relative p-3 pb-4 md:p-4 md:pb-6'>
|
||||
<div className='relative mx-auto max-w-3xl md:max-w-[748px]'>
|
||||
<ChatInput
|
||||
onSubmit={(value, isVoiceInput) => {
|
||||
void handleSendMessage(value, isVoiceInput)
|
||||
onSubmit={(value, isVoiceInput, files) => {
|
||||
void handleSendMessage(value, isVoiceInput, files)
|
||||
}}
|
||||
isStreaming={isStreamingResponse}
|
||||
onStopStreaming={() => stopStreaming(setMessages)}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import type React from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { motion } from 'framer-motion'
|
||||
import { Send, Square } from 'lucide-react'
|
||||
import { AlertCircle, Paperclip, Send, Square, X } from 'lucide-react'
|
||||
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import { VoiceInput } from '@/app/chat/components/input/voice-input'
|
||||
|
||||
@@ -12,8 +12,17 @@ const PLACEHOLDER_DESKTOP = 'Enter a message or click the mic to speak'
|
||||
const MAX_TEXTAREA_HEIGHT = 120 // Max height in pixels (e.g., for about 3-4 lines)
|
||||
const MAX_TEXTAREA_HEIGHT_MOBILE = 100 // Smaller for mobile
|
||||
|
||||
interface AttachedFile {
|
||||
id: string
|
||||
name: string
|
||||
size: number
|
||||
type: string
|
||||
file: File
|
||||
dataUrl?: string
|
||||
}
|
||||
|
||||
export const ChatInput: React.FC<{
|
||||
onSubmit?: (value: string, isVoiceInput?: boolean) => void
|
||||
onSubmit?: (value: string, isVoiceInput?: boolean, files?: AttachedFile[]) => void
|
||||
isStreaming?: boolean
|
||||
onStopStreaming?: () => void
|
||||
onVoiceStart?: () => void
|
||||
@@ -21,8 +30,11 @@ export const ChatInput: React.FC<{
|
||||
}> = ({ onSubmit, isStreaming = false, onStopStreaming, onVoiceStart, voiceOnly = false }) => {
|
||||
const wrapperRef = useRef<HTMLDivElement>(null)
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null) // Ref for the textarea
|
||||
const fileInputRef = useRef<HTMLInputElement>(null)
|
||||
const [isActive, setIsActive] = useState(false)
|
||||
const [inputValue, setInputValue] = useState('')
|
||||
const [attachedFiles, setAttachedFiles] = useState<AttachedFile[]>([])
|
||||
const [uploadErrors, setUploadErrors] = useState<string[]>([])
|
||||
|
||||
// Check if speech-to-text is available in the browser
|
||||
const isSttAvailable =
|
||||
@@ -85,10 +97,75 @@ export const ChatInput: React.FC<{
|
||||
// Focus is now handled by the useEffect above
|
||||
}
|
||||
|
||||
// Handle file selection
|
||||
const handleFileSelect = async (selectedFiles: FileList | null) => {
|
||||
if (!selectedFiles) return
|
||||
|
||||
const newFiles: AttachedFile[] = []
|
||||
const maxSize = 10 * 1024 * 1024 // 10MB limit
|
||||
const maxFiles = 5
|
||||
|
||||
for (let i = 0; i < selectedFiles.length; i++) {
|
||||
if (attachedFiles.length + newFiles.length >= maxFiles) break
|
||||
|
||||
const file = selectedFiles[i]
|
||||
|
||||
// Check file size
|
||||
if (file.size > maxSize) {
|
||||
setUploadErrors((prev) => [...prev, `${file.name} is too large (max 10MB)`])
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for duplicates
|
||||
const isDuplicate = attachedFiles.some(
|
||||
(existingFile) => existingFile.name === file.name && existingFile.size === file.size
|
||||
)
|
||||
if (isDuplicate) {
|
||||
setUploadErrors((prev) => [...prev, `${file.name} already added`])
|
||||
continue
|
||||
}
|
||||
|
||||
// Read file as data URL if it's an image
|
||||
let dataUrl: string | undefined
|
||||
if (file.type.startsWith('image/')) {
|
||||
try {
|
||||
dataUrl = await new Promise<string>((resolve, reject) => {
|
||||
const reader = new FileReader()
|
||||
reader.onload = () => resolve(reader.result as string)
|
||||
reader.onerror = reject
|
||||
reader.readAsDataURL(file)
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error reading file:', error)
|
||||
}
|
||||
}
|
||||
|
||||
newFiles.push({
|
||||
id: crypto.randomUUID(),
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
type: file.type,
|
||||
file,
|
||||
dataUrl,
|
||||
})
|
||||
}
|
||||
|
||||
if (newFiles.length > 0) {
|
||||
setAttachedFiles([...attachedFiles, ...newFiles])
|
||||
setUploadErrors([]) // Clear errors when files are successfully added
|
||||
}
|
||||
}
|
||||
|
||||
const handleRemoveFile = (fileId: string) => {
|
||||
setAttachedFiles(attachedFiles.filter((f) => f.id !== fileId))
|
||||
}
|
||||
|
||||
const handleSubmit = () => {
|
||||
if (!inputValue.trim()) return
|
||||
onSubmit?.(inputValue.trim(), false) // false = not voice input
|
||||
if (!inputValue.trim() && attachedFiles.length === 0) return
|
||||
onSubmit?.(inputValue.trim(), false, attachedFiles) // false = not voice input
|
||||
setInputValue('')
|
||||
setAttachedFiles([])
|
||||
setUploadErrors([]) // Clear errors when sending message
|
||||
if (textareaRef.current) {
|
||||
textareaRef.current.style.height = 'auto' // Reset height after submit
|
||||
textareaRef.current.style.overflowY = 'hidden' // Ensure overflow is hidden
|
||||
@@ -132,6 +209,29 @@ export const ChatInput: React.FC<{
|
||||
<>
|
||||
<div className='fixed right-0 bottom-0 left-0 flex w-full items-center justify-center bg-gradient-to-t from-white to-transparent px-4 pb-4 text-black md:px-0 md:pb-4'>
|
||||
<div ref={wrapperRef} className='w-full max-w-3xl md:max-w-[748px]'>
|
||||
{/* Error Messages */}
|
||||
{uploadErrors.length > 0 && (
|
||||
<div className='mb-3'>
|
||||
<div className='rounded-lg border border-red-200 bg-red-50 p-3 dark:border-red-800/50 dark:bg-red-950/20'>
|
||||
<div className='flex items-start gap-2'>
|
||||
<AlertCircle className='mt-0.5 h-4 w-4 shrink-0 text-red-600 dark:text-red-400' />
|
||||
<div className='flex-1'>
|
||||
<div className='mb-1 font-medium text-red-800 text-sm dark:text-red-300'>
|
||||
File upload error
|
||||
</div>
|
||||
<div className='space-y-1'>
|
||||
{uploadErrors.map((error, idx) => (
|
||||
<div key={idx} className='text-red-700 text-sm dark:text-red-400'>
|
||||
{error}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Text Input Area with Controls */}
|
||||
<motion.div
|
||||
className='rounded-2xl border border-gray-200 bg-white shadow-sm md:rounded-3xl'
|
||||
@@ -140,27 +240,99 @@ export const ChatInput: React.FC<{
|
||||
animate={{ opacity: 1, y: 0 }}
|
||||
transition={{ duration: 0.2 }}
|
||||
>
|
||||
<div className='flex items-center gap-2 p-3 md:p-4'>
|
||||
{/* Voice Input */}
|
||||
{isSttAvailable && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<VoiceInput
|
||||
onVoiceStart={handleVoiceStart}
|
||||
disabled={isStreaming}
|
||||
minimal
|
||||
{/* File Previews */}
|
||||
{attachedFiles.length > 0 && (
|
||||
<div className='mb-2 flex flex-wrap gap-2 px-3 pt-3 md:px-4'>
|
||||
{attachedFiles.map((file) => {
|
||||
const formatFileSize = (bytes: number) => {
|
||||
if (bytes === 0) return '0 B'
|
||||
const k = 1024
|
||||
const sizes = ['B', 'KB', 'MB', 'GB']
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k))
|
||||
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
key={file.id}
|
||||
className={`group relative overflow-hidden rounded-lg border border-gray-200 bg-white dark:border-gray-700 dark:bg-gray-800 ${
|
||||
file.dataUrl
|
||||
? 'h-16 w-16 md:h-20 md:w-20'
|
||||
: 'flex h-16 min-w-[120px] max-w-[200px] items-center gap-2 px-2 md:h-20 md:min-w-[140px] md:max-w-[220px] md:px-3'
|
||||
}`}
|
||||
title=''
|
||||
>
|
||||
{file.dataUrl ? (
|
||||
<img
|
||||
src={file.dataUrl}
|
||||
alt={file.name}
|
||||
className='h-full w-full object-cover'
|
||||
/>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side='top'>
|
||||
<p>Start voice conversation</p>
|
||||
<span className='text-gray-500 text-xs'>Click to enter voice mode</span>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
) : (
|
||||
<>
|
||||
<div className='flex h-8 w-8 flex-shrink-0 items-center justify-center rounded bg-gray-100 md:h-10 md:w-10 dark:bg-gray-700'>
|
||||
<Paperclip
|
||||
size={16}
|
||||
className='text-gray-500 md:h-5 md:w-5 dark:text-gray-400'
|
||||
/>
|
||||
</div>
|
||||
<div className='min-w-0 flex-1'>
|
||||
<div className='truncate font-medium text-gray-800 text-xs dark:text-gray-200'>
|
||||
{file.name}
|
||||
</div>
|
||||
<div className='text-[10px] text-gray-500 dark:text-gray-400'>
|
||||
{formatFileSize(file.size)}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
<button
|
||||
type='button'
|
||||
onClick={() => handleRemoveFile(file.id)}
|
||||
className='absolute top-1 right-1 rounded-full bg-gray-800/80 p-1 text-white opacity-0 transition-opacity hover:bg-gray-800/80 hover:text-white group-hover:opacity-100 dark:bg-black/70 dark:hover:bg-black/70 dark:hover:text-white'
|
||||
>
|
||||
<X size={12} />
|
||||
</button>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className='flex items-center gap-2 p-3 md:p-4'>
|
||||
{/* Paperclip Button */}
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<button
|
||||
type='button'
|
||||
onClick={() => fileInputRef.current?.click()}
|
||||
disabled={isStreaming || attachedFiles.length >= 5}
|
||||
className='flex items-center justify-center rounded-full p-1.5 text-gray-600 transition-colors hover:bg-gray-100 disabled:cursor-not-allowed disabled:opacity-50 md:p-2'
|
||||
>
|
||||
<Paperclip size={16} className='md:h-5 md:w-5' />
|
||||
</button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side='top'>
|
||||
<p>Attach files</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
|
||||
{/* Hidden file input */}
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type='file'
|
||||
multiple
|
||||
onChange={(e) => {
|
||||
handleFileSelect(e.target.files)
|
||||
if (fileInputRef.current) {
|
||||
fileInputRef.current.value = ''
|
||||
}
|
||||
}}
|
||||
className='hidden'
|
||||
disabled={isStreaming}
|
||||
/>
|
||||
|
||||
{/* Text Input Container */}
|
||||
<div className='relative flex-1'>
|
||||
@@ -208,10 +380,30 @@ export const ChatInput: React.FC<{
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Voice Input */}
|
||||
{isSttAvailable && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<VoiceInput
|
||||
onVoiceStart={handleVoiceStart}
|
||||
disabled={isStreaming}
|
||||
minimal
|
||||
/>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side='top'>
|
||||
<p>Start voice conversation</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
|
||||
{/* Send Button */}
|
||||
<button
|
||||
className={`flex items-center justify-center rounded-full p-1.5 text-white transition-colors md:p-2 ${
|
||||
inputValue.trim()
|
||||
inputValue.trim() || attachedFiles.length > 0
|
||||
? 'bg-black hover:bg-zinc-700'
|
||||
: 'cursor-default bg-gray-300 hover:bg-gray-400'
|
||||
}`}
|
||||
|
||||
@@ -72,19 +72,17 @@ export function VoiceInput({
|
||||
|
||||
if (minimal) {
|
||||
return (
|
||||
<motion.button
|
||||
<button
|
||||
type='button'
|
||||
onClick={handleVoiceClick}
|
||||
disabled={disabled}
|
||||
className={`flex items-center justify-center p-1 transition-colors duration-200 ${
|
||||
disabled ? 'cursor-not-allowed opacity-50' : 'cursor-pointer hover:text-gray-600'
|
||||
className={`flex items-center justify-center rounded-full p-1.5 text-gray-600 transition-colors duration-200 hover:bg-gray-100 md:p-2 ${
|
||||
disabled ? 'cursor-not-allowed opacity-50' : 'cursor-pointer'
|
||||
}`}
|
||||
whileHover={{ scale: 1.05 }}
|
||||
whileTap={{ scale: 0.95 }}
|
||||
title='Start voice conversation'
|
||||
>
|
||||
<Mic size={18} className='text-gray-500' />
|
||||
</motion.button>
|
||||
<Mic size={16} className='md:h-5 md:w-5' />
|
||||
</button>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
'use client'
|
||||
|
||||
import { memo, useMemo, useState } from 'react'
|
||||
import { Check, Copy } from 'lucide-react'
|
||||
import { Check, Copy, File as FileIcon, FileText, Image as ImageIcon } from 'lucide-react'
|
||||
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import MarkdownRenderer from './components/markdown-renderer'
|
||||
|
||||
export interface ChatAttachment {
|
||||
id: string
|
||||
name: string
|
||||
type: string
|
||||
dataUrl: string
|
||||
size?: number
|
||||
}
|
||||
|
||||
export interface ChatMessage {
|
||||
id: string
|
||||
content: string | Record<string, unknown>
|
||||
@@ -12,6 +20,7 @@ export interface ChatMessage {
|
||||
timestamp: Date
|
||||
isInitialMessage?: boolean
|
||||
isStreaming?: boolean
|
||||
attachments?: ChatAttachment[]
|
||||
}
|
||||
|
||||
function EnhancedMarkdownRenderer({ content }: { content: string }) {
|
||||
@@ -39,15 +48,96 @@ export const ClientChatMessage = memo(
|
||||
return (
|
||||
<div className='px-4 py-5' data-message-id={message.id}>
|
||||
<div className='mx-auto max-w-3xl'>
|
||||
{/* File attachments displayed above the message */}
|
||||
{message.attachments && message.attachments.length > 0 && (
|
||||
<div className='mb-2 flex justify-end'>
|
||||
<div className='flex flex-wrap gap-2'>
|
||||
{message.attachments.map((attachment) => {
|
||||
const isImage = attachment.type.startsWith('image/')
|
||||
const getFileIcon = (type: string) => {
|
||||
if (type.includes('pdf'))
|
||||
return (
|
||||
<FileText className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
|
||||
)
|
||||
if (type.startsWith('image/'))
|
||||
return (
|
||||
<ImageIcon className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
|
||||
)
|
||||
if (type.includes('text') || type.includes('json'))
|
||||
return (
|
||||
<FileText className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
|
||||
)
|
||||
return (
|
||||
<FileIcon className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
|
||||
)
|
||||
}
|
||||
const formatFileSize = (bytes?: number) => {
|
||||
if (!bytes || bytes === 0) return ''
|
||||
const k = 1024
|
||||
const sizes = ['B', 'KB', 'MB', 'GB']
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k))
|
||||
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
key={attachment.id}
|
||||
className={`relative overflow-hidden rounded-2xl border border-gray-200 bg-gray-50 dark:border-gray-700 dark:bg-gray-800 ${
|
||||
attachment.dataUrl?.trim() ? 'cursor-pointer' : ''
|
||||
} ${
|
||||
isImage
|
||||
? 'h-16 w-16 md:h-20 md:w-20'
|
||||
: 'flex h-16 min-w-[140px] max-w-[220px] items-center gap-2 px-3 md:h-20 md:min-w-[160px] md:max-w-[240px]'
|
||||
}`}
|
||||
onClick={(e) => {
|
||||
if (attachment.dataUrl?.trim()) {
|
||||
e.preventDefault()
|
||||
window.open(attachment.dataUrl, '_blank')
|
||||
}
|
||||
}}
|
||||
>
|
||||
{isImage ? (
|
||||
<img
|
||||
src={attachment.dataUrl}
|
||||
alt={attachment.name}
|
||||
className='h-full w-full object-cover'
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
<div className='flex h-10 w-10 flex-shrink-0 items-center justify-center rounded bg-gray-100 md:h-12 md:w-12 dark:bg-gray-700'>
|
||||
{getFileIcon(attachment.type)}
|
||||
</div>
|
||||
<div className='min-w-0 flex-1'>
|
||||
<div className='truncate font-medium text-gray-800 text-xs md:text-sm dark:text-gray-200'>
|
||||
{attachment.name}
|
||||
</div>
|
||||
{attachment.size && (
|
||||
<div className='text-[10px] text-gray-500 md:text-xs dark:text-gray-400'>
|
||||
{formatFileSize(attachment.size)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className='flex justify-end'>
|
||||
<div className='max-w-[80%] rounded-3xl bg-[#F4F4F4] px-4 py-3 dark:bg-gray-600'>
|
||||
<div className='whitespace-pre-wrap break-words text-base text-gray-800 leading-relaxed dark:text-gray-100'>
|
||||
{isJsonObject ? (
|
||||
<pre>{JSON.stringify(message.content, null, 2)}</pre>
|
||||
) : (
|
||||
<span>{message.content as string}</span>
|
||||
)}
|
||||
</div>
|
||||
{/* Render text content if present and not just file count message */}
|
||||
{message.content && !String(message.content).startsWith('Sent') && (
|
||||
<div className='whitespace-pre-wrap break-words text-base text-gray-800 leading-relaxed dark:text-gray-100'>
|
||||
{isJsonObject ? (
|
||||
<pre>{JSON.stringify(message.content, null, 2)}</pre>
|
||||
) : (
|
||||
<span>{message.content as string}</span>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -4,8 +4,6 @@ import { useRef, useState } from 'react'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import type { ChatMessage } from '@/app/chat/components/message/message'
|
||||
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
|
||||
// No longer need complex output extraction - backend handles this
|
||||
import type { ExecutionResult } from '@/executor/types'
|
||||
|
||||
const logger = createLogger('UseChatStreaming')
|
||||
|
||||
@@ -148,11 +146,16 @@ export function useChatStreaming() {
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.substring(6)
|
||||
|
||||
if (data === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const json = JSON.parse(line.substring(6))
|
||||
const json = JSON.parse(data)
|
||||
const { blockId, chunk: contentChunk, event: eventType } = json
|
||||
|
||||
// Handle error events from the server
|
||||
if (eventType === 'error' || json.event === 'error') {
|
||||
const errorMessage = json.error || CHAT_ERROR_MESSAGES.GENERIC_ERROR
|
||||
setMessages((prev) =>
|
||||
@@ -172,34 +175,11 @@ export function useChatStreaming() {
|
||||
}
|
||||
|
||||
if (eventType === 'final' && json.data) {
|
||||
// The backend has already processed and combined all outputs
|
||||
// We just need to extract the combined content and use it
|
||||
const result = json.data as ExecutionResult
|
||||
|
||||
// Collect all content from logs that have output.content (backend processed)
|
||||
let combinedContent = ''
|
||||
if (result.logs) {
|
||||
const contentParts: string[] = []
|
||||
|
||||
// Get content from all logs that have processed content
|
||||
result.logs.forEach((log) => {
|
||||
if (log.output?.content && typeof log.output.content === 'string') {
|
||||
// The backend already includes proper separators, so just collect the content
|
||||
contentParts.push(log.output.content)
|
||||
}
|
||||
})
|
||||
|
||||
// Join without additional separators since backend already handles this
|
||||
combinedContent = contentParts.join('')
|
||||
}
|
||||
|
||||
// Update the existing streaming message with the final combined content
|
||||
setMessages((prev) =>
|
||||
prev.map((msg) =>
|
||||
msg.id === messageId
|
||||
? {
|
||||
...msg,
|
||||
content: combinedContent || accumulatedText, // Use combined content or fallback to streamed
|
||||
isStreaming: false,
|
||||
}
|
||||
: msg
|
||||
@@ -210,7 +190,6 @@ export function useChatStreaming() {
|
||||
}
|
||||
|
||||
if (blockId && contentChunk) {
|
||||
// Track that this block has streamed content (like chat panel)
|
||||
if (!messageIdMap.has(blockId)) {
|
||||
messageIdMap.set(blockId, messageId)
|
||||
}
|
||||
|
||||
@@ -698,10 +698,6 @@ export function KnowledgeBase({
|
||||
options={{
|
||||
knowledgeBaseId: id,
|
||||
currentWorkspaceId: knowledgeBase?.workspaceId || null,
|
||||
onWorkspaceChange: () => {
|
||||
// Refresh the page to reflect the workspace change
|
||||
window.location.reload()
|
||||
},
|
||||
onDeleteKnowledgeBase: () => setShowDeleteDialog(true),
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
} from '@/components/ui/dropdown-menu'
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { useKnowledgeStore } from '@/stores/knowledge/store'
|
||||
|
||||
const logger = createLogger('WorkspaceSelector')
|
||||
|
||||
@@ -33,6 +34,7 @@ export function WorkspaceSelector({
|
||||
onWorkspaceChange,
|
||||
disabled = false,
|
||||
}: WorkspaceSelectorProps) {
|
||||
const { updateKnowledgeBase } = useKnowledgeStore()
|
||||
const [workspaces, setWorkspaces] = useState<Workspace[]>([])
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
const [isUpdating, setIsUpdating] = useState(false)
|
||||
@@ -95,6 +97,11 @@ export function WorkspaceSelector({
|
||||
|
||||
if (result.success) {
|
||||
logger.info(`Knowledge base workspace updated: ${knowledgeBaseId} -> ${workspaceId}`)
|
||||
|
||||
// Update the store immediately to reflect the change without page reload
|
||||
updateKnowledgeBase(knowledgeBaseId, { workspaceId: workspaceId || undefined })
|
||||
|
||||
// Notify parent component of the change
|
||||
onWorkspaceChange?.(workspaceId)
|
||||
} else {
|
||||
throw new Error(result.error || 'Failed to update workspace')
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { Eye, Maximize2, Minimize2, X } from 'lucide-react'
|
||||
import { Maximize2, Minimize2, X } from 'lucide-react'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'
|
||||
@@ -45,7 +45,6 @@ export function FrozenCanvasModal({
|
||||
{/* Header */}
|
||||
<DialogHeader className='flex flex-row items-center justify-between border-b bg-background p-4'>
|
||||
<div className='flex items-center gap-3'>
|
||||
<Eye className='h-5 w-5 text-blue-500 dark:text-blue-400' />
|
||||
<div>
|
||||
<DialogTitle className='font-semibold text-foreground text-lg'>
|
||||
Logged Workflow State
|
||||
@@ -83,14 +82,15 @@ export function FrozenCanvasModal({
|
||||
traceSpans={traceSpans}
|
||||
height='100%'
|
||||
width='100%'
|
||||
// Ensure preview leaves padding at edges so nodes don't touch header
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Footer with instructions */}
|
||||
<div className='border-t bg-background px-6 py-3'>
|
||||
<div className='text-muted-foreground text-sm'>
|
||||
💡 Click on blocks to see their input and output data at execution time. This canvas
|
||||
shows the exact state of the workflow when this execution was captured.
|
||||
Click on blocks to see their input and output data at execution time. This canvas shows
|
||||
the exact state of the workflow when this execution was captured.
|
||||
</div>
|
||||
</div>
|
||||
</DialogContent>
|
||||
|
||||
@@ -582,6 +582,8 @@ export function FrozenCanvas({
|
||||
workflowState={data.workflowState}
|
||||
showSubBlocks={true}
|
||||
isPannable={true}
|
||||
defaultZoom={0.8}
|
||||
fitPadding={0.25}
|
||||
onNodeClick={(blockId) => {
|
||||
// Always allow clicking blocks, even if they don't have execution data
|
||||
// This is important for failed workflows where some blocks never executed
|
||||
|
||||
@@ -13,6 +13,67 @@ import {
|
||||
import { cn, redactApiKeys } from '@/lib/utils'
|
||||
import type { TraceSpan } from '@/stores/logs/filters/types'
|
||||
|
||||
function getSpanKey(span: TraceSpan): string {
|
||||
if (span.id) {
|
||||
return span.id
|
||||
}
|
||||
|
||||
const name = span.name || 'span'
|
||||
const start = span.startTime || 'unknown-start'
|
||||
const end = span.endTime || 'unknown-end'
|
||||
|
||||
return `${name}|${start}|${end}`
|
||||
}
|
||||
|
||||
function mergeTraceSpanChildren(...groups: TraceSpan[][]): TraceSpan[] {
|
||||
const merged: TraceSpan[] = []
|
||||
const seen = new Set<string>()
|
||||
|
||||
groups.forEach((group) => {
|
||||
group.forEach((child) => {
|
||||
const key = getSpanKey(child)
|
||||
if (seen.has(key)) {
|
||||
return
|
||||
}
|
||||
seen.add(key)
|
||||
merged.push(child)
|
||||
})
|
||||
})
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
function normalizeChildWorkflowSpan(span: TraceSpan): TraceSpan {
|
||||
const enrichedSpan: TraceSpan = { ...span }
|
||||
|
||||
if (enrichedSpan.output && typeof enrichedSpan.output === 'object') {
|
||||
enrichedSpan.output = { ...enrichedSpan.output }
|
||||
}
|
||||
|
||||
const normalizedChildren = Array.isArray(span.children)
|
||||
? span.children.map((childSpan) => normalizeChildWorkflowSpan(childSpan))
|
||||
: []
|
||||
|
||||
const outputChildSpans = Array.isArray(span.output?.childTraceSpans)
|
||||
? (span.output!.childTraceSpans as TraceSpan[]).map((childSpan) =>
|
||||
normalizeChildWorkflowSpan(childSpan)
|
||||
)
|
||||
: []
|
||||
|
||||
const mergedChildren = mergeTraceSpanChildren(normalizedChildren, outputChildSpans)
|
||||
|
||||
if (enrichedSpan.output && 'childTraceSpans' in enrichedSpan.output) {
|
||||
const { childTraceSpans, ...cleanOutput } = enrichedSpan.output as {
|
||||
childTraceSpans?: TraceSpan[]
|
||||
} & Record<string, unknown>
|
||||
enrichedSpan.output = cleanOutput
|
||||
}
|
||||
|
||||
enrichedSpan.children = mergedChildren.length > 0 ? mergedChildren : undefined
|
||||
|
||||
return enrichedSpan
|
||||
}
|
||||
|
||||
interface TraceSpansDisplayProps {
|
||||
traceSpans?: TraceSpan[]
|
||||
totalDuration?: number
|
||||
@@ -310,22 +371,23 @@ export function TraceSpansDisplay({
|
||||
</div>
|
||||
<div className='w-full overflow-hidden rounded-md border shadow-sm'>
|
||||
{traceSpans.map((span, index) => {
|
||||
const normalizedSpan = normalizeChildWorkflowSpan(span)
|
||||
const hasSubItems = Boolean(
|
||||
(span.children && span.children.length > 0) ||
|
||||
(span.toolCalls && span.toolCalls.length > 0) ||
|
||||
span.input ||
|
||||
span.output
|
||||
(normalizedSpan.children && normalizedSpan.children.length > 0) ||
|
||||
(normalizedSpan.toolCalls && normalizedSpan.toolCalls.length > 0) ||
|
||||
normalizedSpan.input ||
|
||||
normalizedSpan.output
|
||||
)
|
||||
return (
|
||||
<TraceSpanItem
|
||||
key={index}
|
||||
span={span}
|
||||
span={normalizedSpan}
|
||||
depth={0}
|
||||
totalDuration={
|
||||
actualTotalDuration !== undefined ? actualTotalDuration : totalDuration
|
||||
}
|
||||
isLast={index === traceSpans.length - 1}
|
||||
parentStartTime={new Date(span.startTime).getTime()}
|
||||
parentStartTime={new Date(normalizedSpan.startTime).getTime()}
|
||||
workflowStartTime={workflowStartTime}
|
||||
onToggle={handleSpanToggle}
|
||||
expandedSpans={expandedSpans}
|
||||
@@ -612,17 +674,19 @@ function TraceSpanItem({
|
||||
{hasChildren && (
|
||||
<div>
|
||||
{span.children?.map((childSpan, index) => {
|
||||
const enrichedChildSpan = normalizeChildWorkflowSpan(childSpan)
|
||||
|
||||
const childHasSubItems = Boolean(
|
||||
(childSpan.children && childSpan.children.length > 0) ||
|
||||
(childSpan.toolCalls && childSpan.toolCalls.length > 0) ||
|
||||
childSpan.input ||
|
||||
childSpan.output
|
||||
(enrichedChildSpan.children && enrichedChildSpan.children.length > 0) ||
|
||||
(enrichedChildSpan.toolCalls && enrichedChildSpan.toolCalls.length > 0) ||
|
||||
enrichedChildSpan.input ||
|
||||
enrichedChildSpan.output
|
||||
)
|
||||
|
||||
return (
|
||||
<TraceSpanItem
|
||||
key={index}
|
||||
span={childSpan}
|
||||
span={enrichedChildSpan}
|
||||
depth={depth + 1}
|
||||
totalDuration={totalDuration}
|
||||
isLast={index === (span.children?.length || 0) - 1}
|
||||
|
||||
@@ -413,7 +413,7 @@ export function DeployForm({
|
||||
setKeyType('personal')
|
||||
if (createError) setCreateError(null)
|
||||
}}
|
||||
className='h-8'
|
||||
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
|
||||
>
|
||||
Personal
|
||||
</Button>
|
||||
@@ -425,7 +425,7 @@ export function DeployForm({
|
||||
setKeyType('workspace')
|
||||
if (createError) setCreateError(null)
|
||||
}}
|
||||
className='h-8'
|
||||
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
|
||||
>
|
||||
Workspace
|
||||
</Button>
|
||||
@@ -452,7 +452,7 @@ export function DeployForm({
|
||||
|
||||
<AlertDialogFooter className='flex'>
|
||||
<AlertDialogCancel
|
||||
className='h-9 w-full rounded-[8px]'
|
||||
className='h-9 w-full rounded-[8px] border-border bg-background text-foreground hover:bg-muted dark:border-border dark:bg-background dark:text-foreground dark:hover:bg-muted/80'
|
||||
onClick={() => {
|
||||
setNewKeyName('')
|
||||
setKeyType('personal')
|
||||
|
||||
@@ -12,16 +12,20 @@ import {
|
||||
} from '@/components/ui/dropdown-menu'
|
||||
import { Label } from '@/components/ui/label'
|
||||
import { getEnv, isTruthy } from '@/lib/env'
|
||||
import { OutputSelect } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/chat/components/output-select/output-select'
|
||||
|
||||
interface ExampleCommandProps {
|
||||
command: string
|
||||
apiKey: string
|
||||
endpoint: string
|
||||
showLabel?: boolean
|
||||
getInputFormatExample?: () => string
|
||||
getInputFormatExample?: (includeStreaming?: boolean) => string
|
||||
workflowId: string | null
|
||||
selectedStreamingOutputs: string[]
|
||||
onSelectedStreamingOutputsChange: (outputs: string[]) => void
|
||||
}
|
||||
|
||||
type ExampleMode = 'sync' | 'async'
|
||||
type ExampleMode = 'sync' | 'async' | 'stream'
|
||||
type ExampleType = 'execute' | 'status' | 'rate-limits'
|
||||
|
||||
export function ExampleCommand({
|
||||
@@ -30,29 +34,27 @@ export function ExampleCommand({
|
||||
endpoint,
|
||||
showLabel = true,
|
||||
getInputFormatExample,
|
||||
workflowId,
|
||||
selectedStreamingOutputs,
|
||||
onSelectedStreamingOutputsChange,
|
||||
}: ExampleCommandProps) {
|
||||
const [mode, setMode] = useState<ExampleMode>('sync')
|
||||
const [exampleType, setExampleType] = useState<ExampleType>('execute')
|
||||
const isAsyncEnabled = isTruthy(getEnv('NEXT_PUBLIC_TRIGGER_DEV_ENABLED'))
|
||||
|
||||
// Format the curl command to use a placeholder for the API key
|
||||
const formatCurlCommand = (command: string, apiKey: string) => {
|
||||
if (!command.includes('curl')) return command
|
||||
|
||||
// Replace the actual API key with a placeholder in the command
|
||||
const sanitizedCommand = command.replace(apiKey, '$SIM_API_KEY')
|
||||
|
||||
// Format the command with line breaks for better readability
|
||||
return sanitizedCommand
|
||||
.replace(' -H ', '\n -H ')
|
||||
.replace(' -d ', '\n -d ')
|
||||
.replace(' http', '\n http')
|
||||
}
|
||||
|
||||
// Get the command with placeholder for copying (single line, no line breaks)
|
||||
const getActualCommand = () => {
|
||||
const displayCommand = getDisplayCommand()
|
||||
// Remove line breaks and extra whitespace for copying
|
||||
return displayCommand
|
||||
.replace(/\\\n\s*/g, ' ') // Remove backslash + newline + whitespace
|
||||
.replace(/\n\s*/g, ' ') // Remove any remaining newlines + whitespace
|
||||
@@ -63,32 +65,56 @@ export function ExampleCommand({
|
||||
const getDisplayCommand = () => {
|
||||
const baseEndpoint = endpoint.replace(apiKey, '$SIM_API_KEY')
|
||||
const inputExample = getInputFormatExample
|
||||
? getInputFormatExample()
|
||||
? getInputFormatExample(false)
|
||||
: ' -d \'{"input": "your data here"}\''
|
||||
|
||||
const addStreamingParams = (dashD: string) => {
|
||||
const match = dashD.match(/-d\s*'([\s\S]*)'/)
|
||||
if (!match) {
|
||||
const payload: Record<string, any> = { stream: true }
|
||||
if (selectedStreamingOutputs && selectedStreamingOutputs.length > 0) {
|
||||
payload.selectedOutputs = selectedStreamingOutputs
|
||||
}
|
||||
return ` -d '${JSON.stringify(payload)}'`
|
||||
}
|
||||
try {
|
||||
const payload = JSON.parse(match[1]) as Record<string, any>
|
||||
payload.stream = true
|
||||
if (selectedStreamingOutputs && selectedStreamingOutputs.length > 0) {
|
||||
payload.selectedOutputs = selectedStreamingOutputs
|
||||
}
|
||||
return ` -d '${JSON.stringify(payload)}'`
|
||||
} catch {
|
||||
return dashD
|
||||
}
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
case 'sync':
|
||||
if (getInputFormatExample) {
|
||||
const syncInputExample = getInputFormatExample(false)
|
||||
return `curl -X POST \\\n -H "X-API-Key: $SIM_API_KEY" \\\n -H "Content-Type: application/json"${syncInputExample} \\\n ${baseEndpoint}`
|
||||
}
|
||||
return formatCurlCommand(command, apiKey)
|
||||
|
||||
case 'stream': {
|
||||
const streamDashD = addStreamingParams(inputExample)
|
||||
return `curl -X POST \\\n -H "X-API-Key: $SIM_API_KEY" \\\n -H "Content-Type: application/json"${streamDashD} \\\n ${baseEndpoint}`
|
||||
}
|
||||
|
||||
case 'async':
|
||||
switch (exampleType) {
|
||||
case 'execute':
|
||||
return `curl -X POST \\
|
||||
-H "X-API-Key: $SIM_API_KEY" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-H "X-Execution-Mode: async"${inputExample} \\
|
||||
${baseEndpoint}`
|
||||
return `curl -X POST \\\n -H "X-API-Key: $SIM_API_KEY" \\\n -H "Content-Type: application/json" \\\n -H "X-Execution-Mode: async"${inputExample} \\\n ${baseEndpoint}`
|
||||
|
||||
case 'status': {
|
||||
const baseUrl = baseEndpoint.split('/api/workflows/')[0]
|
||||
return `curl -H "X-API-Key: $SIM_API_KEY" \\
|
||||
${baseUrl}/api/jobs/JOB_ID_FROM_EXECUTION`
|
||||
return `curl -H "X-API-Key: $SIM_API_KEY" \\\n ${baseUrl}/api/jobs/JOB_ID_FROM_EXECUTION`
|
||||
}
|
||||
|
||||
case 'rate-limits': {
|
||||
const baseUrlForRateLimit = baseEndpoint.split('/api/workflows/')[0]
|
||||
return `curl -H "X-API-Key: $SIM_API_KEY" \\
|
||||
${baseUrlForRateLimit}/api/users/me/usage-limits`
|
||||
return `curl -H "X-API-Key: $SIM_API_KEY" \\\n ${baseUrlForRateLimit}/api/users/me/usage-limits`
|
||||
}
|
||||
|
||||
default:
|
||||
@@ -114,10 +140,11 @@ export function ExampleCommand({
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='space-y-1.5'>
|
||||
<div className='flex items-center justify-between'>
|
||||
{showLabel && <Label className='font-medium text-sm'>Example</Label>}
|
||||
{isAsyncEnabled && (
|
||||
<div className='space-y-4'>
|
||||
{/* Example Command */}
|
||||
<div className='space-y-1.5'>
|
||||
<div className='flex items-center justify-between'>
|
||||
{showLabel && <Label className='font-medium text-sm'>Example</Label>}
|
||||
<div className='flex items-center gap-1'>
|
||||
<Button
|
||||
variant='outline'
|
||||
@@ -134,57 +161,85 @@ export function ExampleCommand({
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
onClick={() => setMode('async')}
|
||||
onClick={() => setMode('stream')}
|
||||
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
|
||||
mode === 'async'
|
||||
mode === 'stream'
|
||||
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
|
||||
: ''
|
||||
}`}
|
||||
>
|
||||
Async
|
||||
Stream
|
||||
</Button>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
{isAsyncEnabled && (
|
||||
<>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
className='h-6 min-w-[140px] justify-between px-2 py-1 text-xs'
|
||||
disabled={mode === 'sync'}
|
||||
onClick={() => setMode('async')}
|
||||
className={`h-6 min-w-[50px] px-2 py-1 text-xs transition-none ${
|
||||
mode === 'async'
|
||||
? 'border-primary bg-primary text-primary-foreground hover:border-primary hover:bg-primary hover:text-primary-foreground'
|
||||
: ''
|
||||
}`}
|
||||
>
|
||||
<span className='truncate'>{getExampleTitle()}</span>
|
||||
<ChevronDown className='ml-1 h-3 w-3 flex-shrink-0' />
|
||||
Async
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align='end'>
|
||||
<DropdownMenuItem
|
||||
className='cursor-pointer'
|
||||
onClick={() => setExampleType('execute')}
|
||||
>
|
||||
Async Execution
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className='cursor-pointer'
|
||||
onClick={() => setExampleType('status')}
|
||||
>
|
||||
Check Job Status
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className='cursor-pointer'
|
||||
onClick={() => setExampleType('rate-limits')}
|
||||
>
|
||||
Rate Limits & Usage
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
className='h-6 min-w-[140px] justify-between px-2 py-1 text-xs'
|
||||
disabled={mode === 'sync' || mode === 'stream'}
|
||||
>
|
||||
<span className='truncate'>{getExampleTitle()}</span>
|
||||
<ChevronDown className='ml-1 h-3 w-3 flex-shrink-0' />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align='end'>
|
||||
<DropdownMenuItem
|
||||
className='cursor-pointer'
|
||||
onClick={() => setExampleType('execute')}
|
||||
>
|
||||
Async Execution
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className='cursor-pointer'
|
||||
onClick={() => setExampleType('status')}
|
||||
>
|
||||
Check Job Status
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className='cursor-pointer'
|
||||
onClick={() => setExampleType('rate-limits')}
|
||||
>
|
||||
Rate Limits & Usage
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Output selector for Stream mode */}
|
||||
{mode === 'stream' && (
|
||||
<div className='space-y-2'>
|
||||
<div className='text-muted-foreground text-xs'>Select outputs to stream</div>
|
||||
<OutputSelect
|
||||
workflowId={workflowId}
|
||||
selectedOutputs={selectedStreamingOutputs}
|
||||
onOutputSelect={onSelectedStreamingOutputsChange}
|
||||
placeholder='Select outputs for streaming'
|
||||
valueMode='label'
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className='group relative h-[120px] rounded-md border bg-background transition-colors hover:bg-muted/50'>
|
||||
<pre className='h-full overflow-auto whitespace-pre-wrap p-3 font-mono text-xs'>
|
||||
{getDisplayCommand()}
|
||||
</pre>
|
||||
<CopyButton text={getActualCommand()} />
|
||||
<div className='group relative overflow-x-auto rounded-md border bg-background transition-colors hover:bg-muted/50'>
|
||||
<pre className='whitespace-pre p-3 font-mono text-xs'>{getDisplayCommand()}</pre>
|
||||
<CopyButton text={getActualCommand()} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -43,7 +43,9 @@ interface DeploymentInfoProps {
|
||||
workflowId: string | null
|
||||
deployedState: WorkflowState
|
||||
isLoadingDeployedState: boolean
|
||||
getInputFormatExample?: () => string
|
||||
getInputFormatExample?: (includeStreaming?: boolean) => string
|
||||
selectedStreamingOutputs: string[]
|
||||
onSelectedStreamingOutputsChange: (outputs: string[]) => void
|
||||
}
|
||||
|
||||
export function DeploymentInfo({
|
||||
@@ -57,6 +59,8 @@ export function DeploymentInfo({
|
||||
deployedState,
|
||||
isLoadingDeployedState,
|
||||
getInputFormatExample,
|
||||
selectedStreamingOutputs,
|
||||
onSelectedStreamingOutputsChange,
|
||||
}: DeploymentInfoProps) {
|
||||
const [isViewingDeployed, setIsViewingDeployed] = useState(false)
|
||||
|
||||
@@ -116,6 +120,9 @@ export function DeploymentInfo({
|
||||
apiKey={deploymentInfo.apiKey}
|
||||
endpoint={deploymentInfo.endpoint}
|
||||
getInputFormatExample={getInputFormatExample}
|
||||
workflowId={workflowId}
|
||||
selectedStreamingOutputs={selectedStreamingOutputs}
|
||||
onSelectedStreamingOutputsChange={onSelectedStreamingOutputsChange}
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ interface DeployFormValues {
|
||||
newKeyName?: string
|
||||
}
|
||||
|
||||
type TabView = 'general' | 'api' | 'chat'
|
||||
type TabView = 'general' | 'api' | 'versions' | 'chat'
|
||||
|
||||
export function DeployModal({
|
||||
open,
|
||||
@@ -92,6 +92,7 @@ export function DeployModal({
|
||||
const [apiDeployError, setApiDeployError] = useState<string | null>(null)
|
||||
const [chatExists, setChatExists] = useState(false)
|
||||
const [isChatFormValid, setIsChatFormValid] = useState(false)
|
||||
const [selectedStreamingOutputs, setSelectedStreamingOutputs] = useState<string[]>([])
|
||||
|
||||
const [versions, setVersions] = useState<WorkflowDeploymentVersionResponse[]>([])
|
||||
const [versionsLoading, setVersionsLoading] = useState(false)
|
||||
@@ -102,7 +103,7 @@ export function DeployModal({
|
||||
const [currentPage, setCurrentPage] = useState(1)
|
||||
const itemsPerPage = 5
|
||||
|
||||
const getInputFormatExample = () => {
|
||||
const getInputFormatExample = (includeStreaming = false) => {
|
||||
let inputFormatExample = ''
|
||||
try {
|
||||
const blocks = Object.values(useWorkflowStore.getState().blocks)
|
||||
@@ -117,8 +118,9 @@ export function DeployModal({
|
||||
if (targetBlock) {
|
||||
const inputFormat = useSubBlockStore.getState().getValue(targetBlock.id, 'inputFormat')
|
||||
|
||||
const exampleData: Record<string, any> = {}
|
||||
|
||||
if (inputFormat && Array.isArray(inputFormat) && inputFormat.length > 0) {
|
||||
const exampleData: Record<string, any> = {}
|
||||
inputFormat.forEach((field: any) => {
|
||||
if (field.name) {
|
||||
switch (field.type) {
|
||||
@@ -137,10 +139,53 @@ export function DeployModal({
|
||||
case 'array':
|
||||
exampleData[field.name] = [1, 2, 3]
|
||||
break
|
||||
case 'files':
|
||||
exampleData[field.name] = [
|
||||
{
|
||||
data: 'data:application/pdf;base64,...',
|
||||
type: 'file',
|
||||
name: 'document.pdf',
|
||||
mime: 'application/pdf',
|
||||
},
|
||||
]
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Add streaming parameters if enabled and outputs are selected
|
||||
if (includeStreaming && selectedStreamingOutputs.length > 0) {
|
||||
exampleData.stream = true
|
||||
// Convert blockId_attribute format to blockName.attribute format for display
|
||||
const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
|
||||
|
||||
const convertedOutputs = selectedStreamingOutputs.map((outputId) => {
|
||||
// If it starts with a UUID, convert to blockName.attribute format
|
||||
if (UUID_REGEX.test(outputId)) {
|
||||
const underscoreIndex = outputId.indexOf('_')
|
||||
if (underscoreIndex === -1) return outputId
|
||||
|
||||
const blockId = outputId.substring(0, underscoreIndex)
|
||||
const attribute = outputId.substring(underscoreIndex + 1)
|
||||
|
||||
// Find the block by ID and get its name
|
||||
const block = blocks.find((b) => b.id === blockId)
|
||||
if (block?.name) {
|
||||
// Normalize block name: lowercase and remove spaces
|
||||
const normalizedBlockName = block.name.toLowerCase().replace(/\s+/g, '')
|
||||
return `${normalizedBlockName}.${attribute}`
|
||||
}
|
||||
}
|
||||
|
||||
// Already in blockName.attribute format or couldn't convert
|
||||
return outputId
|
||||
})
|
||||
|
||||
exampleData.selectedOutputs = convertedOutputs
|
||||
}
|
||||
|
||||
if (Object.keys(exampleData).length > 0) {
|
||||
inputFormatExample = ` -d '${JSON.stringify(exampleData)}'`
|
||||
}
|
||||
}
|
||||
@@ -199,7 +244,7 @@ export function DeployModal({
|
||||
setIsLoading(true)
|
||||
fetchApiKeys()
|
||||
fetchChatDeploymentInfo()
|
||||
setActiveTab('general')
|
||||
setActiveTab('api')
|
||||
}
|
||||
}, [open, workflowId])
|
||||
|
||||
@@ -231,7 +276,7 @@ export function DeployModal({
|
||||
|
||||
const data = await response.json()
|
||||
const endpoint = `${getEnv('NEXT_PUBLIC_APP_URL')}/api/workflows/${workflowId}/execute`
|
||||
const inputFormatExample = getInputFormatExample()
|
||||
const inputFormatExample = getInputFormatExample(selectedStreamingOutputs.length > 0) // Include streaming params only if outputs selected
|
||||
|
||||
setDeploymentInfo({
|
||||
isDeployed: data.isDeployed,
|
||||
@@ -287,7 +332,7 @@ export function DeployModal({
|
||||
useWorkflowRegistry.getState().setWorkflowNeedsRedeployment(workflowId, false)
|
||||
}
|
||||
const endpoint = `${getEnv('NEXT_PUBLIC_APP_URL')}/api/workflows/${workflowId}/execute`
|
||||
const inputFormatExample = getInputFormatExample()
|
||||
const inputFormatExample = getInputFormatExample(selectedStreamingOutputs.length > 0) // Include streaming params only if outputs selected
|
||||
|
||||
const newDeploymentInfo = {
|
||||
isDeployed: true,
|
||||
@@ -494,7 +539,7 @@ export function DeployModal({
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={handleCloseModal}>
|
||||
<DialogContent
|
||||
className='flex max-h-[78vh] flex-col gap-0 overflow-hidden p-0 sm:max-w-[600px]'
|
||||
className='flex max-h-[90vh] flex-col gap-0 overflow-hidden p-0 sm:max-w-[600px]'
|
||||
hideCloseButton
|
||||
>
|
||||
<DialogHeader className='flex-shrink-0 border-b px-6 py-4'>
|
||||
@@ -510,16 +555,6 @@ export function DeployModal({
|
||||
<div className='flex flex-1 flex-col overflow-hidden'>
|
||||
<div className='flex h-14 flex-none items-center border-b px-6'>
|
||||
<div className='flex gap-2'>
|
||||
<button
|
||||
onClick={() => setActiveTab('general')}
|
||||
className={`rounded-md px-3 py-1 text-sm transition-colors ${
|
||||
activeTab === 'general'
|
||||
? 'bg-accent text-foreground'
|
||||
: 'text-muted-foreground hover:bg-accent/50 hover:text-foreground'
|
||||
}`}
|
||||
>
|
||||
General
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveTab('api')}
|
||||
className={`rounded-md px-3 py-1 text-sm transition-colors ${
|
||||
@@ -530,6 +565,16 @@ export function DeployModal({
|
||||
>
|
||||
API
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveTab('versions')}
|
||||
className={`rounded-md px-3 py-1 text-sm transition-colors ${
|
||||
activeTab === 'versions'
|
||||
? 'bg-accent text-foreground'
|
||||
: 'text-muted-foreground hover:bg-accent/50 hover:text-foreground'
|
||||
}`}
|
||||
>
|
||||
Versions
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setActiveTab('chat')}
|
||||
className={`rounded-md px-3 py-1 text-sm transition-colors ${
|
||||
@@ -545,175 +590,6 @@ export function DeployModal({
|
||||
|
||||
<div className='flex-1 overflow-y-auto'>
|
||||
<div className='p-6'>
|
||||
{activeTab === 'general' && (
|
||||
<>
|
||||
{isDeployed ? (
|
||||
<DeploymentInfo
|
||||
isLoading={isLoading}
|
||||
deploymentInfo={
|
||||
deploymentInfo ? { ...deploymentInfo, needsRedeployment } : null
|
||||
}
|
||||
onRedeploy={handleRedeploy}
|
||||
onUndeploy={handleUndeploy}
|
||||
isSubmitting={isSubmitting}
|
||||
isUndeploying={isUndeploying}
|
||||
workflowId={workflowId}
|
||||
deployedState={deployedState}
|
||||
isLoadingDeployedState={isLoadingDeployedState}
|
||||
getInputFormatExample={getInputFormatExample}
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
{apiDeployError && (
|
||||
<div className='mb-4 rounded-md border border-destructive/30 bg-destructive/10 p-3 text-destructive text-sm'>
|
||||
<div className='font-semibold'>API Deployment Error</div>
|
||||
<div>{apiDeployError}</div>
|
||||
</div>
|
||||
)}
|
||||
<div className='-mx-1 px-1'>
|
||||
<DeployForm
|
||||
apiKeys={apiKeys}
|
||||
keysLoaded={keysLoaded}
|
||||
onSubmit={onDeploy}
|
||||
onApiKeyCreated={fetchApiKeys}
|
||||
formId='deploy-api-form-general'
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
<div className='mt-6'>
|
||||
<div className='mb-3 font-medium text-sm'>Deployment Versions</div>
|
||||
{versionsLoading ? (
|
||||
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
|
||||
Loading deployments...
|
||||
</div>
|
||||
) : versions.length === 0 ? (
|
||||
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
|
||||
No deployments yet
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className='overflow-hidden rounded-md border'>
|
||||
<table className='w-full'>
|
||||
<thead className='border-b bg-muted/50'>
|
||||
<tr>
|
||||
<th className='w-10' />
|
||||
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
|
||||
Version
|
||||
</th>
|
||||
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
|
||||
Deployed By
|
||||
</th>
|
||||
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
|
||||
Created
|
||||
</th>
|
||||
<th className='w-10' />
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className='divide-y'>
|
||||
{versions
|
||||
.slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage)
|
||||
.map((v) => (
|
||||
<tr
|
||||
key={v.id}
|
||||
className='cursor-pointer transition-colors hover:bg-muted/30'
|
||||
onClick={() => openVersionPreview(v.version)}
|
||||
>
|
||||
<td className='px-4 py-2.5'>
|
||||
<div
|
||||
className={`h-2 w-2 rounded-full ${
|
||||
v.isActive ? 'bg-green-500' : 'bg-muted-foreground/40'
|
||||
}`}
|
||||
title={v.isActive ? 'Active' : 'Inactive'}
|
||||
/>
|
||||
</td>
|
||||
<td className='px-4 py-2.5'>
|
||||
<span className='font-medium text-sm'>v{v.version}</span>
|
||||
</td>
|
||||
<td className='px-4 py-2.5'>
|
||||
<span className='text-muted-foreground text-sm'>
|
||||
{v.deployedBy || 'Unknown'}
|
||||
</span>
|
||||
</td>
|
||||
<td className='px-4 py-2.5'>
|
||||
<span className='text-muted-foreground text-sm'>
|
||||
{new Date(v.createdAt).toLocaleDateString()}{' '}
|
||||
{new Date(v.createdAt).toLocaleTimeString()}
|
||||
</span>
|
||||
</td>
|
||||
<td
|
||||
className='px-4 py-2.5'
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
variant='ghost'
|
||||
size='icon'
|
||||
className='h-8 w-8'
|
||||
disabled={activatingVersion === v.version}
|
||||
>
|
||||
<MoreVertical className='h-4 w-4' />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align='end'>
|
||||
<DropdownMenuItem
|
||||
onClick={() => activateVersion(v.version)}
|
||||
disabled={v.isActive || activatingVersion === v.version}
|
||||
>
|
||||
{v.isActive
|
||||
? 'Active'
|
||||
: activatingVersion === v.version
|
||||
? 'Activating...'
|
||||
: 'Activate'}
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => openVersionPreview(v.version)}
|
||||
>
|
||||
Inspect
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{versions.length > itemsPerPage && (
|
||||
<div className='mt-3 flex items-center justify-between'>
|
||||
<span className='text-muted-foreground text-sm'>
|
||||
Showing{' '}
|
||||
{Math.min((currentPage - 1) * itemsPerPage + 1, versions.length)} -{' '}
|
||||
{Math.min(currentPage * itemsPerPage, versions.length)} of{' '}
|
||||
{versions.length}
|
||||
</span>
|
||||
<div className='flex gap-2'>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
onClick={() => setCurrentPage(currentPage - 1)}
|
||||
disabled={currentPage === 1}
|
||||
>
|
||||
Previous
|
||||
</Button>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
onClick={() => setCurrentPage(currentPage + 1)}
|
||||
disabled={currentPage * itemsPerPage >= versions.length}
|
||||
>
|
||||
Next
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
{activeTab === 'api' && (
|
||||
<>
|
||||
{isDeployed ? (
|
||||
@@ -730,6 +606,8 @@ export function DeployModal({
|
||||
deployedState={deployedState}
|
||||
isLoadingDeployedState={isLoadingDeployedState}
|
||||
getInputFormatExample={getInputFormatExample}
|
||||
selectedStreamingOutputs={selectedStreamingOutputs}
|
||||
onSelectedStreamingOutputsChange={setSelectedStreamingOutputs}
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
@@ -739,6 +617,7 @@ export function DeployModal({
|
||||
<div>{apiDeployError}</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className='-mx-1 px-1'>
|
||||
<DeployForm
|
||||
apiKeys={apiKeys}
|
||||
@@ -753,6 +632,136 @@ export function DeployModal({
|
||||
</>
|
||||
)}
|
||||
|
||||
{activeTab === 'versions' && (
|
||||
<>
|
||||
<div className='mb-3 font-medium text-sm'>Deployment Versions</div>
|
||||
{versionsLoading ? (
|
||||
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
|
||||
Loading deployments...
|
||||
</div>
|
||||
) : versions.length === 0 ? (
|
||||
<div className='rounded-md border p-4 text-center text-muted-foreground text-sm'>
|
||||
No deployments yet
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className='overflow-hidden rounded-md border'>
|
||||
<table className='w-full'>
|
||||
<thead className='border-b bg-muted/50'>
|
||||
<tr>
|
||||
<th className='w-10' />
|
||||
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
|
||||
Version
|
||||
</th>
|
||||
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
|
||||
Deployed By
|
||||
</th>
|
||||
<th className='px-4 py-2 text-left font-medium text-muted-foreground text-xs'>
|
||||
Created
|
||||
</th>
|
||||
<th className='w-10' />
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className='divide-y'>
|
||||
{versions
|
||||
.slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage)
|
||||
.map((v) => (
|
||||
<tr
|
||||
key={v.id}
|
||||
className='cursor-pointer transition-colors hover:bg-muted/30'
|
||||
onClick={() => openVersionPreview(v.version)}
|
||||
>
|
||||
<td className='px-4 py-2.5'>
|
||||
<div
|
||||
className={`h-2 w-2 rounded-full ${
|
||||
v.isActive ? 'bg-green-500' : 'bg-muted-foreground/40'
|
||||
}`}
|
||||
title={v.isActive ? 'Active' : 'Inactive'}
|
||||
/>
|
||||
</td>
|
||||
<td className='px-4 py-2.5'>
|
||||
<span className='font-medium text-sm'>v{v.version}</span>
|
||||
</td>
|
||||
<td className='px-4 py-2.5'>
|
||||
<span className='text-muted-foreground text-sm'>
|
||||
{v.deployedBy || 'Unknown'}
|
||||
</span>
|
||||
</td>
|
||||
<td className='px-4 py-2.5'>
|
||||
<span className='text-muted-foreground text-sm'>
|
||||
{new Date(v.createdAt).toLocaleDateString()}{' '}
|
||||
{new Date(v.createdAt).toLocaleTimeString()}
|
||||
</span>
|
||||
</td>
|
||||
<td className='px-4 py-2.5' onClick={(e) => e.stopPropagation()}>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
variant='ghost'
|
||||
size='icon'
|
||||
className='h-8 w-8'
|
||||
disabled={activatingVersion === v.version}
|
||||
>
|
||||
<MoreVertical className='h-4 w-4' />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align='end'>
|
||||
<DropdownMenuItem
|
||||
onClick={() => activateVersion(v.version)}
|
||||
disabled={v.isActive || activatingVersion === v.version}
|
||||
>
|
||||
{v.isActive
|
||||
? 'Active'
|
||||
: activatingVersion === v.version
|
||||
? 'Activating...'
|
||||
: 'Activate'}
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => openVersionPreview(v.version)}
|
||||
>
|
||||
Inspect
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{versions.length > itemsPerPage && (
|
||||
<div className='mt-3 flex items-center justify-between'>
|
||||
<span className='text-muted-foreground text-sm'>
|
||||
Showing{' '}
|
||||
{Math.min((currentPage - 1) * itemsPerPage + 1, versions.length)} -{' '}
|
||||
{Math.min(currentPage * itemsPerPage, versions.length)} of{' '}
|
||||
{versions.length}
|
||||
</span>
|
||||
<div className='flex gap-2'>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
onClick={() => setCurrentPage(currentPage - 1)}
|
||||
disabled={currentPage === 1}
|
||||
>
|
||||
Previous
|
||||
</Button>
|
||||
<Button
|
||||
variant='outline'
|
||||
size='sm'
|
||||
onClick={() => setCurrentPage(currentPage + 1)}
|
||||
disabled={currentPage * itemsPerPage >= versions.length}
|
||||
>
|
||||
Next
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
{activeTab === 'chat' && (
|
||||
<ChatDeploy
|
||||
workflowId={workflowId || ''}
|
||||
@@ -776,36 +785,6 @@ export function DeployModal({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{activeTab === 'general' && !isDeployed && (
|
||||
<div className='flex flex-shrink-0 justify-between border-t px-6 py-4'>
|
||||
<Button variant='outline' onClick={handleCloseModal}>
|
||||
Cancel
|
||||
</Button>
|
||||
|
||||
<Button
|
||||
type='submit'
|
||||
form='deploy-api-form-general'
|
||||
disabled={isSubmitting || (!keysLoaded && !apiKeys.length)}
|
||||
className={cn(
|
||||
'gap-2 font-medium',
|
||||
'bg-[var(--brand-primary-hover-hex)] hover:bg-[var(--brand-primary-hover-hex)]',
|
||||
'shadow-[0_0_0_0_var(--brand-primary-hover-hex)] hover:shadow-[0_0_0_4px_rgba(127,47,255,0.15)]',
|
||||
'text-white transition-all duration-200',
|
||||
'disabled:opacity-50 disabled:hover:bg-[var(--brand-primary-hover-hex)] disabled:hover:shadow-none'
|
||||
)}
|
||||
>
|
||||
{isSubmitting ? (
|
||||
<>
|
||||
<Loader2 className='mr-1.5 h-3.5 w-3.5 animate-spin' />
|
||||
Deploying...
|
||||
</>
|
||||
) : (
|
||||
'Deploy'
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{activeTab === 'api' && !isDeployed && (
|
||||
<div className='flex flex-shrink-0 justify-between border-t px-6 py-4'>
|
||||
<Button variant='outline' onClick={handleCloseModal}>
|
||||
|
||||
@@ -56,7 +56,8 @@ export function DeployedWorkflowCard({
|
||||
<div className='flex items-center justify-between'>
|
||||
<h3 className='font-medium'>Workflow Preview</h3>
|
||||
<div className='flex items-center gap-2'>
|
||||
{hasCurrent && (
|
||||
{/* Show Current only when no explicit version is selected */}
|
||||
{hasCurrent && !hasSelected && (
|
||||
<button
|
||||
type='button'
|
||||
className={cn(
|
||||
@@ -68,6 +69,7 @@ export function DeployedWorkflowCard({
|
||||
Current
|
||||
</button>
|
||||
)}
|
||||
{/* Always show Active Deployed */}
|
||||
{hasActive && (
|
||||
<button
|
||||
type='button'
|
||||
@@ -80,6 +82,7 @@ export function DeployedWorkflowCard({
|
||||
Active Deployed
|
||||
</button>
|
||||
)}
|
||||
{/* If a specific version is selected, show its label */}
|
||||
{hasSelected && (
|
||||
<button
|
||||
type='button'
|
||||
@@ -109,7 +112,7 @@ export function DeployedWorkflowCard({
|
||||
width='100%'
|
||||
isPannable={true}
|
||||
defaultPosition={{ x: 0, y: 0 }}
|
||||
defaultZoom={1}
|
||||
defaultZoom={0.8}
|
||||
/>
|
||||
</div>
|
||||
</CardContent>
|
||||
|
||||
@@ -106,17 +106,22 @@ export function DeployedWorkflowModal({
|
||||
selectedVersionLabel={selectedVersionLabel}
|
||||
/>
|
||||
|
||||
<div className='mt-6 flex justify-between'>
|
||||
<div className='mt-1 flex justify-between'>
|
||||
<div className='flex items-center gap-2'>
|
||||
{onActivateVersion && (
|
||||
<Button
|
||||
onClick={onActivateVersion}
|
||||
disabled={isSelectedVersionActive || !!isActivating}
|
||||
variant={isSelectedVersionActive ? 'secondary' : 'default'}
|
||||
>
|
||||
{isSelectedVersionActive ? 'Active' : isActivating ? 'Activating…' : 'Activate'}
|
||||
</Button>
|
||||
)}
|
||||
{onActivateVersion &&
|
||||
(isSelectedVersionActive ? (
|
||||
<div className='inline-flex items-center gap-2 rounded-md bg-emerald-500/10 px-2.5 py-1 font-medium text-emerald-600 text-xs dark:text-emerald-400'>
|
||||
<span className='relative flex h-2 w-2 items-center justify-center'>
|
||||
<span className='absolute inline-flex h-full w-full animate-ping rounded-full bg-emerald-500 opacity-75' />
|
||||
<span className='relative inline-flex h-2 w-2 rounded-full bg-emerald-500' />
|
||||
</span>
|
||||
Active
|
||||
</div>
|
||||
) : (
|
||||
<Button onClick={onActivateVersion} disabled={!!isActivating}>
|
||||
{isActivating ? 'Activating…' : 'Activate'}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<div className='flex items-center gap-2'>
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
'use client'
|
||||
|
||||
import { type KeyboardEvent, useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { ArrowDown, ArrowUp } from 'lucide-react'
|
||||
import { AlertCircle, ArrowDown, ArrowUp, File, FileText, Image, Paperclip, X } from 'lucide-react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { Notice } from '@/components/ui/notice'
|
||||
import { ScrollArea } from '@/components/ui/scroll-area'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import {
|
||||
@@ -13,7 +12,6 @@ import {
|
||||
parseOutputContentSafely,
|
||||
} from '@/lib/response-format'
|
||||
import {
|
||||
ChatFileUpload,
|
||||
ChatMessage,
|
||||
OutputSelect,
|
||||
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/chat/components'
|
||||
@@ -261,12 +259,40 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
|
||||
let result: any = null
|
||||
|
||||
try {
|
||||
// Add user message
|
||||
// Read files as data URLs for display in chat (only images to avoid localStorage quota issues)
|
||||
const attachmentsWithData = await Promise.all(
|
||||
chatFiles.map(async (file) => {
|
||||
let dataUrl = ''
|
||||
// Only read images as data URLs to avoid storing large files in localStorage
|
||||
if (file.type.startsWith('image/')) {
|
||||
try {
|
||||
dataUrl = await new Promise<string>((resolve, reject) => {
|
||||
const reader = new FileReader()
|
||||
reader.onload = () => resolve(reader.result as string)
|
||||
reader.onerror = reject
|
||||
reader.readAsDataURL(file.file)
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error reading file as data URL:', error)
|
||||
}
|
||||
}
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.name,
|
||||
type: file.type,
|
||||
size: file.size,
|
||||
dataUrl,
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
// Add user message with attachments (include all files, even non-images without dataUrl)
|
||||
addMessage({
|
||||
content:
|
||||
sentMessage || (chatFiles.length > 0 ? `Uploaded ${chatFiles.length} file(s)` : ''),
|
||||
workflowId: activeWorkflowId,
|
||||
type: 'user',
|
||||
attachments: attachmentsWithData,
|
||||
})
|
||||
|
||||
// Prepare workflow input
|
||||
@@ -305,7 +331,18 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
|
||||
|
||||
// Check if we got a streaming response
|
||||
if (result && 'stream' in result && result.stream instanceof ReadableStream) {
|
||||
const messageIdMap = new Map<string, string>()
|
||||
// Create a single message for all outputs (like chat client does)
|
||||
const responseMessageId = crypto.randomUUID()
|
||||
let accumulatedContent = ''
|
||||
|
||||
// Add initial streaming message
|
||||
addMessage({
|
||||
id: responseMessageId,
|
||||
content: '',
|
||||
workflowId: activeWorkflowId,
|
||||
type: 'workflow',
|
||||
isStreaming: true,
|
||||
})
|
||||
|
||||
const reader = result.stream.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
@@ -314,8 +351,8 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
// Finalize all streaming messages
|
||||
messageIdMap.forEach((id) => finalizeMessageStream(id))
|
||||
// Finalize the streaming message
|
||||
finalizeMessageStream(responseMessageId)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -324,92 +361,38 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
try {
|
||||
const json = JSON.parse(line.substring(6))
|
||||
const { blockId, chunk: contentChunk, event, data } = json
|
||||
const data = line.substring(6)
|
||||
|
||||
if (event === 'final' && data) {
|
||||
const result = data as ExecutionResult
|
||||
if (data === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const json = JSON.parse(data)
|
||||
const { blockId, chunk: contentChunk, event, data: eventData } = json
|
||||
|
||||
if (event === 'final' && eventData) {
|
||||
const result = eventData as ExecutionResult
|
||||
|
||||
// If final result is a failure, surface error and stop
|
||||
if ('success' in result && !result.success) {
|
||||
addMessage({
|
||||
content: `Error: ${result.error || 'Workflow execution failed'}`,
|
||||
workflowId: activeWorkflowId,
|
||||
type: 'workflow',
|
||||
})
|
||||
|
||||
// Clear any existing message streams
|
||||
for (const msgId of messageIdMap.values()) {
|
||||
finalizeMessageStream(msgId)
|
||||
}
|
||||
messageIdMap.clear()
|
||||
// Update the existing message with error
|
||||
appendMessageContent(
|
||||
responseMessageId,
|
||||
`${accumulatedContent ? '\n\n' : ''}Error: ${result.error || 'Workflow execution failed'}`
|
||||
)
|
||||
finalizeMessageStream(responseMessageId)
|
||||
|
||||
// Stop processing
|
||||
return
|
||||
}
|
||||
|
||||
const nonStreamingLogs =
|
||||
result.logs?.filter((log) => !messageIdMap.has(log.blockId)) || []
|
||||
|
||||
if (nonStreamingLogs.length > 0) {
|
||||
const outputsToRender = selectedOutputs.filter((outputId) => {
|
||||
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
|
||||
return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
|
||||
})
|
||||
|
||||
for (const outputId of outputsToRender) {
|
||||
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
|
||||
const path = extractPathFromOutputId(outputId, blockIdForOutput)
|
||||
const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
|
||||
if (log) {
|
||||
let output = log.output
|
||||
if (path) {
|
||||
output = parseOutputContentSafely(output)
|
||||
const pathParts = path.split('.')
|
||||
let current = output
|
||||
for (const part of pathParts) {
|
||||
if (current && typeof current === 'object' && part in current) {
|
||||
current = current[part]
|
||||
} else {
|
||||
current = undefined
|
||||
break
|
||||
}
|
||||
}
|
||||
output = current
|
||||
}
|
||||
if (output !== undefined) {
|
||||
addMessage({
|
||||
content: typeof output === 'string' ? output : JSON.stringify(output),
|
||||
workflowId: activeWorkflowId,
|
||||
type: 'workflow',
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Final event just marks completion, content already streamed
|
||||
finalizeMessageStream(responseMessageId)
|
||||
} else if (blockId && contentChunk) {
|
||||
if (!messageIdMap.has(blockId)) {
|
||||
const newMessageId = crypto.randomUUID()
|
||||
messageIdMap.set(blockId, newMessageId)
|
||||
addMessage({
|
||||
id: newMessageId,
|
||||
content: contentChunk,
|
||||
workflowId: activeWorkflowId,
|
||||
type: 'workflow',
|
||||
isStreaming: true,
|
||||
})
|
||||
} else {
|
||||
const existingMessageId = messageIdMap.get(blockId)
|
||||
if (existingMessageId) {
|
||||
appendMessageContent(existingMessageId, contentChunk)
|
||||
}
|
||||
}
|
||||
} else if (blockId && event === 'end') {
|
||||
const existingMessageId = messageIdMap.get(blockId)
|
||||
if (existingMessageId) {
|
||||
finalizeMessageStream(existingMessageId)
|
||||
}
|
||||
// Accumulate all content into the single message
|
||||
accumulatedContent += contentChunk
|
||||
appendMessageContent(responseMessageId, contentChunk)
|
||||
}
|
||||
} catch (e) {
|
||||
logger.error('Error parsing stream data:', e)
|
||||
@@ -669,66 +652,212 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
|
||||
|
||||
if (validNewFiles.length > 0) {
|
||||
setChatFiles([...chatFiles, ...validNewFiles])
|
||||
setUploadErrors([]) // Clear errors when files are successfully added
|
||||
}
|
||||
}
|
||||
}
|
||||
}}
|
||||
>
|
||||
{/* File upload section */}
|
||||
<div className='mb-2'>
|
||||
{uploadErrors.length > 0 && (
|
||||
<div className='mb-2'>
|
||||
<Notice variant='error' title='File upload error'>
|
||||
<ul className='list-disc pl-5'>
|
||||
{uploadErrors.map((err, idx) => (
|
||||
<li key={idx}>{err}</li>
|
||||
))}
|
||||
</ul>
|
||||
</Notice>
|
||||
{/* Error messages */}
|
||||
{uploadErrors.length > 0 && (
|
||||
<div className='mb-2'>
|
||||
<div className='rounded-lg border border-red-200 bg-red-50 p-3 dark:border-red-800/50 dark:bg-red-950/20'>
|
||||
<div className='flex items-start gap-2'>
|
||||
<AlertCircle className='mt-0.5 h-4 w-4 shrink-0 text-red-600 dark:text-red-400' />
|
||||
<div className='flex-1'>
|
||||
<div className='mb-1 font-medium text-red-800 text-sm dark:text-red-300'>
|
||||
File upload error
|
||||
</div>
|
||||
<div className='space-y-1'>
|
||||
{uploadErrors.map((err, idx) => (
|
||||
<div key={idx} className='text-red-700 text-sm dark:text-red-400'>
|
||||
{err}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Combined input container matching copilot style */}
|
||||
<div
|
||||
className={`rounded-[8px] border border-[#E5E5E5] bg-[#FFFFFF] p-2 shadow-xs transition-all duration-200 dark:border-[#414141] dark:bg-[var(--surface-elevated)] ${
|
||||
isDragOver
|
||||
? 'border-[var(--brand-primary-hover-hex)] bg-purple-50/50 dark:border-[var(--brand-primary-hover-hex)] dark:bg-purple-950/20'
|
||||
: ''
|
||||
}`}
|
||||
>
|
||||
{/* File thumbnails */}
|
||||
{chatFiles.length > 0 && (
|
||||
<div className='mb-2 flex flex-wrap gap-1.5'>
|
||||
{chatFiles.map((file) => {
|
||||
const isImage = file.type.startsWith('image/')
|
||||
const previewUrl = isImage ? URL.createObjectURL(file.file) : null
|
||||
const getFileIcon = (type: string) => {
|
||||
if (type.includes('pdf'))
|
||||
return <FileText className='h-5 w-5 text-muted-foreground' />
|
||||
if (type.startsWith('image/'))
|
||||
return <Image className='h-5 w-5 text-muted-foreground' />
|
||||
if (type.includes('text') || type.includes('json'))
|
||||
return <FileText className='h-5 w-5 text-muted-foreground' />
|
||||
return <File className='h-5 w-5 text-muted-foreground' />
|
||||
}
|
||||
const formatFileSize = (bytes: number) => {
|
||||
if (bytes === 0) return '0 B'
|
||||
const k = 1024
|
||||
const sizes = ['B', 'KB', 'MB', 'GB']
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k))
|
||||
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
key={file.id}
|
||||
className={`group relative overflow-hidden rounded-md border border-border/50 bg-muted/20 ${
|
||||
previewUrl
|
||||
? 'h-16 w-16'
|
||||
: 'flex h-16 min-w-[120px] max-w-[200px] items-center gap-2 px-2'
|
||||
}`}
|
||||
>
|
||||
{previewUrl ? (
|
||||
<img
|
||||
src={previewUrl}
|
||||
alt={file.name}
|
||||
className='h-full w-full object-cover'
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
<div className='flex h-8 w-8 flex-shrink-0 items-center justify-center rounded bg-background/50'>
|
||||
{getFileIcon(file.type)}
|
||||
</div>
|
||||
<div className='min-w-0 flex-1'>
|
||||
<div className='truncate font-medium text-foreground text-xs'>
|
||||
{file.name}
|
||||
</div>
|
||||
<div className='text-[10px] text-muted-foreground'>
|
||||
{formatFileSize(file.size)}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Remove button */}
|
||||
<Button
|
||||
variant='ghost'
|
||||
size='icon'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
if (previewUrl) URL.revokeObjectURL(previewUrl)
|
||||
setChatFiles(chatFiles.filter((f) => f.id !== file.id))
|
||||
}}
|
||||
className='absolute top-0.5 right-0.5 h-5 w-5 bg-gray-800/80 p-0 text-white opacity-0 transition-opacity hover:bg-gray-800/80 hover:text-white group-hover:opacity-100 dark:bg-black/70 dark:hover:bg-black/70 dark:hover:text-white'
|
||||
>
|
||||
<X className='h-3 w-3' />
|
||||
</Button>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
<ChatFileUpload
|
||||
files={chatFiles}
|
||||
onFilesChange={(files) => {
|
||||
setChatFiles(files)
|
||||
}}
|
||||
maxFiles={5}
|
||||
maxSize={10}
|
||||
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
|
||||
onError={(errors) => setUploadErrors(errors)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className='flex gap-2'>
|
||||
<Input
|
||||
ref={inputRef}
|
||||
value={chatMessage}
|
||||
onChange={(e) => {
|
||||
setChatMessage(e.target.value)
|
||||
setHistoryIndex(-1) // Reset history index when typing
|
||||
}}
|
||||
onKeyDown={handleKeyPress}
|
||||
placeholder={isDragOver ? 'Drop files here...' : 'Type a message...'}
|
||||
className={`h-9 flex-1 rounded-lg border-[#E5E5E5] bg-[#FFFFFF] text-muted-foreground shadow-xs focus-visible:ring-0 focus-visible:ring-offset-0 dark:border-[#414141] dark:bg-[var(--surface-elevated)] ${
|
||||
isDragOver
|
||||
? 'border-[var(--brand-primary-hover-hex)] bg-purple-50/50 dark:border-[var(--brand-primary-hover-hex)] dark:bg-purple-950/20'
|
||||
: ''
|
||||
}`}
|
||||
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
|
||||
/>
|
||||
<Button
|
||||
onClick={handleSendMessage}
|
||||
size='icon'
|
||||
disabled={
|
||||
(!chatMessage.trim() && chatFiles.length === 0) ||
|
||||
!activeWorkflowId ||
|
||||
isExecuting ||
|
||||
isUploadingFiles
|
||||
}
|
||||
className='h-9 w-9 rounded-lg bg-[var(--brand-primary-hover-hex)] text-white shadow-[0_0_0_0_var(--brand-primary-hover-hex)] transition-all duration-200 hover:bg-[var(--brand-primary-hover-hex)] hover:shadow-[0_0_0_4px_rgba(127,47,255,0.15)]'
|
||||
>
|
||||
<ArrowUp className='h-4 w-4' />
|
||||
</Button>
|
||||
{/* Input row */}
|
||||
<div className='flex items-center gap-1'>
|
||||
{/* Attach button */}
|
||||
<Button
|
||||
variant='ghost'
|
||||
size='icon'
|
||||
onClick={() => document.getElementById('chat-file-input')?.click()}
|
||||
disabled={
|
||||
!activeWorkflowId || isExecuting || isUploadingFiles || chatFiles.length >= 5
|
||||
}
|
||||
className='h-6 w-6 shrink-0 text-muted-foreground hover:text-foreground'
|
||||
title='Attach files'
|
||||
>
|
||||
<Paperclip className='h-3 w-3' />
|
||||
</Button>
|
||||
|
||||
{/* Hidden file input */}
|
||||
<input
|
||||
id='chat-file-input'
|
||||
type='file'
|
||||
multiple
|
||||
onChange={(e) => {
|
||||
const files = e.target.files
|
||||
if (!files) return
|
||||
|
||||
const newFiles: ChatFile[] = []
|
||||
const errors: string[] = []
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
if (chatFiles.length + newFiles.length >= 5) {
|
||||
errors.push('Maximum 5 files allowed')
|
||||
break
|
||||
}
|
||||
const file = files[i]
|
||||
if (file.size > 10 * 1024 * 1024) {
|
||||
errors.push(`${file.name} is too large (max 10MB)`)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for duplicates
|
||||
const isDuplicate = chatFiles.some(
|
||||
(existingFile) =>
|
||||
existingFile.name === file.name && existingFile.size === file.size
|
||||
)
|
||||
if (isDuplicate) {
|
||||
errors.push(`${file.name} already added`)
|
||||
continue
|
||||
}
|
||||
|
||||
newFiles.push({
|
||||
id: crypto.randomUUID(),
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
type: file.type,
|
||||
file,
|
||||
})
|
||||
}
|
||||
if (errors.length > 0) setUploadErrors(errors)
|
||||
if (newFiles.length > 0) {
|
||||
setChatFiles([...chatFiles, ...newFiles])
|
||||
setUploadErrors([]) // Clear errors when files are successfully added
|
||||
}
|
||||
e.target.value = ''
|
||||
}}
|
||||
className='hidden'
|
||||
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
|
||||
/>
|
||||
|
||||
{/* Text input */}
|
||||
<Input
|
||||
ref={inputRef}
|
||||
value={chatMessage}
|
||||
onChange={(e) => {
|
||||
setChatMessage(e.target.value)
|
||||
setHistoryIndex(-1)
|
||||
}}
|
||||
onKeyDown={handleKeyPress}
|
||||
placeholder={isDragOver ? 'Drop files here...' : 'Type a message...'}
|
||||
className='h-8 flex-1 border-0 bg-transparent font-sans text-foreground text-sm shadow-none placeholder:text-muted-foreground focus-visible:ring-0 focus-visible:ring-offset-0'
|
||||
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
|
||||
/>
|
||||
|
||||
{/* Send button */}
|
||||
<Button
|
||||
onClick={handleSendMessage}
|
||||
size='icon'
|
||||
disabled={
|
||||
(!chatMessage.trim() && chatFiles.length === 0) ||
|
||||
!activeWorkflowId ||
|
||||
isExecuting ||
|
||||
isUploadingFiles
|
||||
}
|
||||
className='h-6 w-6 shrink-0 rounded-full bg-[var(--brand-primary-hover-hex)] text-white shadow-[0_0_0_0_var(--brand-primary-hover-hex)] transition-all duration-200 hover:bg-[var(--brand-primary-hover-hex)] hover:shadow-[0_0_0_4px_rgba(127,47,255,0.15)]'
|
||||
>
|
||||
<ArrowUp className='h-3 w-3' />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
import { useMemo } from 'react'
|
||||
import { File, FileText, Image as ImageIcon } from 'lucide-react'
|
||||
|
||||
interface ChatAttachment {
|
||||
id: string
|
||||
name: string
|
||||
type: string
|
||||
dataUrl: string
|
||||
size?: number
|
||||
}
|
||||
|
||||
interface ChatMessageProps {
|
||||
message: {
|
||||
@@ -7,6 +16,7 @@ interface ChatMessageProps {
|
||||
timestamp: string | Date
|
||||
type: 'user' | 'workflow'
|
||||
isStreaming?: boolean
|
||||
attachments?: ChatAttachment[]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,12 +68,81 @@ export function ChatMessage({ message }: ChatMessageProps) {
|
||||
if (message.type === 'user') {
|
||||
return (
|
||||
<div className='w-full py-2'>
|
||||
{/* File attachments displayed above the message, completely separate from message box */}
|
||||
{message.attachments && message.attachments.length > 0 && (
|
||||
<div className='mb-1 flex justify-end'>
|
||||
<div className='flex flex-wrap gap-1.5'>
|
||||
{message.attachments.map((attachment) => {
|
||||
const isImage = attachment.type.startsWith('image/')
|
||||
const getFileIcon = (type: string) => {
|
||||
if (type.includes('pdf'))
|
||||
return <FileText className='h-5 w-5 text-muted-foreground' />
|
||||
if (type.startsWith('image/'))
|
||||
return <ImageIcon className='h-5 w-5 text-muted-foreground' />
|
||||
if (type.includes('text') || type.includes('json'))
|
||||
return <FileText className='h-5 w-5 text-muted-foreground' />
|
||||
return <File className='h-5 w-5 text-muted-foreground' />
|
||||
}
|
||||
const formatFileSize = (bytes?: number) => {
|
||||
if (!bytes || bytes === 0) return ''
|
||||
const k = 1024
|
||||
const sizes = ['B', 'KB', 'MB', 'GB']
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k))
|
||||
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
key={attachment.id}
|
||||
className={`relative overflow-hidden rounded-md border border-border/50 bg-muted/20 ${
|
||||
attachment.dataUrl?.trim() ? 'cursor-pointer' : ''
|
||||
} ${isImage ? 'h-16 w-16' : 'flex h-16 min-w-[120px] max-w-[200px] items-center gap-2 px-2'}`}
|
||||
onClick={(e) => {
|
||||
if (attachment.dataUrl?.trim()) {
|
||||
e.preventDefault()
|
||||
window.open(attachment.dataUrl, '_blank')
|
||||
}
|
||||
}}
|
||||
>
|
||||
{isImage && attachment.dataUrl ? (
|
||||
<img
|
||||
src={attachment.dataUrl}
|
||||
alt={attachment.name}
|
||||
className='h-full w-full object-cover'
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
<div className='flex h-8 w-8 flex-shrink-0 items-center justify-center rounded bg-background/50'>
|
||||
{getFileIcon(attachment.type)}
|
||||
</div>
|
||||
<div className='min-w-0 flex-1'>
|
||||
<div className='truncate font-medium text-foreground text-xs'>
|
||||
{attachment.name}
|
||||
</div>
|
||||
{attachment.size && (
|
||||
<div className='text-[10px] text-muted-foreground'>
|
||||
{formatFileSize(attachment.size)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className='flex justify-end'>
|
||||
<div className='max-w-[80%]'>
|
||||
<div className='rounded-[10px] bg-secondary px-3 py-2'>
|
||||
<div className='whitespace-pre-wrap break-words font-normal text-foreground text-sm leading-normal'>
|
||||
<WordWrap text={formattedContent} />
|
||||
</div>
|
||||
{/* Render text content if present and not just file count message */}
|
||||
{formattedContent && !formattedContent.startsWith('Uploaded') && (
|
||||
<div className='whitespace-pre-wrap break-words font-normal text-foreground text-sm leading-normal'>
|
||||
<WordWrap text={formattedContent} />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -78,7 +157,7 @@ export function ChatMessage({ message }: ChatMessageProps) {
|
||||
<div className='whitespace-pre-wrap break-words text-foreground'>
|
||||
<WordWrap text={formattedContent} />
|
||||
{message.isStreaming && (
|
||||
<span className='ml-1 inline-block h-4 w-2 animate-pulse bg-primary' />
|
||||
<span className='ml-1 inline-block h-4 w-2 animate-pulse bg-gray-400 dark:bg-gray-300' />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { Check, ChevronDown } from 'lucide-react'
|
||||
import { createPortal } from 'react-dom'
|
||||
import { extractFieldsFromSchema, parseResponseFormatSafely } from '@/lib/response-format'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { getBlock } from '@/blocks'
|
||||
@@ -13,6 +14,7 @@ interface OutputSelectProps {
|
||||
onOutputSelect: (outputIds: string[]) => void
|
||||
disabled?: boolean
|
||||
placeholder?: string
|
||||
valueMode?: 'id' | 'label'
|
||||
}
|
||||
|
||||
export function OutputSelect({
|
||||
@@ -21,11 +23,46 @@ export function OutputSelect({
|
||||
onOutputSelect,
|
||||
disabled = false,
|
||||
placeholder = 'Select output sources',
|
||||
valueMode = 'id',
|
||||
}: OutputSelectProps) {
|
||||
const [isOutputDropdownOpen, setIsOutputDropdownOpen] = useState(false)
|
||||
const dropdownRef = useRef<HTMLDivElement>(null)
|
||||
const portalRef = useRef<HTMLDivElement>(null)
|
||||
const [portalStyle, setPortalStyle] = useState<{
|
||||
top: number
|
||||
left: number
|
||||
width: number
|
||||
height: number
|
||||
} | null>(null)
|
||||
const blocks = useWorkflowStore((state) => state.blocks)
|
||||
const { isShowingDiff, isDiffReady, diffWorkflow } = useWorkflowDiffStore()
|
||||
// Find all scrollable ancestors so the dropdown can stay pinned on scroll
|
||||
const getScrollableAncestors = (el: HTMLElement | null): (HTMLElement | Window)[] => {
|
||||
const ancestors: (HTMLElement | Window)[] = []
|
||||
let node: HTMLElement | null = el?.parentElement || null
|
||||
const isScrollable = (elem: HTMLElement) => {
|
||||
const style = window.getComputedStyle(elem)
|
||||
const overflowY = style.overflowY
|
||||
const overflow = style.overflow
|
||||
const hasScroll = elem.scrollHeight > elem.clientHeight
|
||||
return (
|
||||
hasScroll &&
|
||||
(overflowY === 'auto' ||
|
||||
overflowY === 'scroll' ||
|
||||
overflow === 'auto' ||
|
||||
overflow === 'scroll')
|
||||
)
|
||||
}
|
||||
|
||||
while (node && node !== document.body) {
|
||||
if (isScrollable(node)) ancestors.push(node)
|
||||
node = node.parentElement
|
||||
}
|
||||
|
||||
// Always include window as a fallback
|
||||
ancestors.push(window)
|
||||
return ancestors
|
||||
}
|
||||
|
||||
// Track subblock store state to ensure proper reactivity
|
||||
const subBlockValues = useSubBlockStore((state) =>
|
||||
@@ -166,28 +203,31 @@ export function OutputSelect({
|
||||
return outputs
|
||||
}, [workflowBlocks, workflowId, isShowingDiff, isDiffReady, diffWorkflow, blocks, subBlockValues])
|
||||
|
||||
// Utility to check selected by id or label
|
||||
const isSelectedValue = (o: { id: string; label: string }) =>
|
||||
selectedOutputs.includes(o.id) || selectedOutputs.includes(o.label)
|
||||
|
||||
// Get selected outputs display text
|
||||
const selectedOutputsDisplayText = useMemo(() => {
|
||||
if (!selectedOutputs || selectedOutputs.length === 0) {
|
||||
return placeholder
|
||||
}
|
||||
|
||||
// Ensure all selected outputs exist in the workflowOutputs array
|
||||
const validOutputs = selectedOutputs.filter((id) => workflowOutputs.some((o) => o.id === id))
|
||||
// Ensure all selected outputs exist in the workflowOutputs array by id or label
|
||||
const validOutputs = selectedOutputs.filter((val) =>
|
||||
workflowOutputs.some((o) => o.id === val || o.label === val)
|
||||
)
|
||||
|
||||
if (validOutputs.length === 0) {
|
||||
return placeholder
|
||||
}
|
||||
|
||||
if (validOutputs.length === 1) {
|
||||
const output = workflowOutputs.find((o) => o.id === validOutputs[0])
|
||||
const output = workflowOutputs.find(
|
||||
(o) => o.id === validOutputs[0] || o.label === validOutputs[0]
|
||||
)
|
||||
if (output) {
|
||||
// Add defensive check for output.blockName
|
||||
const blockNameText =
|
||||
output.blockName && typeof output.blockName === 'string'
|
||||
? output.blockName.replace(/\s+/g, '').toLowerCase()
|
||||
: `block-${output.blockId}`
|
||||
return `${blockNameText}.${output.path}`
|
||||
return output.label
|
||||
}
|
||||
return placeholder
|
||||
}
|
||||
@@ -199,10 +239,14 @@ export function OutputSelect({
|
||||
const selectedOutputInfo = useMemo(() => {
|
||||
if (!selectedOutputs || selectedOutputs.length === 0) return null
|
||||
|
||||
const validOutputs = selectedOutputs.filter((id) => workflowOutputs.some((o) => o.id === id))
|
||||
const validOutputs = selectedOutputs.filter((val) =>
|
||||
workflowOutputs.some((o) => o.id === val || o.label === val)
|
||||
)
|
||||
if (validOutputs.length === 0) return null
|
||||
|
||||
const output = workflowOutputs.find((o) => o.id === validOutputs[0])
|
||||
const output = workflowOutputs.find(
|
||||
(o) => o.id === validOutputs[0] || o.label === validOutputs[0]
|
||||
)
|
||||
if (!output) return null
|
||||
|
||||
return {
|
||||
@@ -295,7 +339,10 @@ export function OutputSelect({
|
||||
// Close dropdown when clicking outside
|
||||
useEffect(() => {
|
||||
const handleClickOutside = (event: MouseEvent) => {
|
||||
if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) {
|
||||
const target = event.target as Node
|
||||
const insideTrigger = dropdownRef.current?.contains(target)
|
||||
const insidePortal = portalRef.current?.contains(target)
|
||||
if (!insideTrigger && !insidePortal) {
|
||||
setIsOutputDropdownOpen(false)
|
||||
}
|
||||
}
|
||||
@@ -306,15 +353,52 @@ export function OutputSelect({
|
||||
}
|
||||
}, [])
|
||||
|
||||
// Position the portal dropdown relative to the trigger button
|
||||
useEffect(() => {
|
||||
const updatePosition = () => {
|
||||
if (!isOutputDropdownOpen || !dropdownRef.current) return
|
||||
const rect = dropdownRef.current.getBoundingClientRect()
|
||||
const available = Math.max(140, window.innerHeight - rect.bottom - 12)
|
||||
const height = Math.min(available, 240)
|
||||
setPortalStyle({ top: rect.bottom + 4, left: rect.left, width: rect.width, height })
|
||||
}
|
||||
|
||||
let attachedScrollTargets: (HTMLElement | Window)[] = []
|
||||
let rafId: number | null = null
|
||||
if (isOutputDropdownOpen) {
|
||||
updatePosition()
|
||||
window.addEventListener('resize', updatePosition)
|
||||
attachedScrollTargets = getScrollableAncestors(dropdownRef.current)
|
||||
attachedScrollTargets.forEach((target) =>
|
||||
target.addEventListener('scroll', updatePosition, { passive: true })
|
||||
)
|
||||
const loop = () => {
|
||||
updatePosition()
|
||||
rafId = requestAnimationFrame(loop)
|
||||
}
|
||||
rafId = requestAnimationFrame(loop)
|
||||
}
|
||||
|
||||
return () => {
|
||||
window.removeEventListener('resize', updatePosition)
|
||||
attachedScrollTargets.forEach((target) =>
|
||||
target.removeEventListener('scroll', updatePosition)
|
||||
)
|
||||
if (rafId) cancelAnimationFrame(rafId)
|
||||
}
|
||||
}, [isOutputDropdownOpen])
|
||||
|
||||
// Handle output selection - toggle selection
|
||||
const handleOutputSelection = (value: string) => {
|
||||
const emittedValue =
|
||||
valueMode === 'label' ? value : workflowOutputs.find((o) => o.label === value)?.id || value
|
||||
let newSelectedOutputs: string[]
|
||||
const index = selectedOutputs.indexOf(value)
|
||||
const index = selectedOutputs.indexOf(emittedValue)
|
||||
|
||||
if (index === -1) {
|
||||
newSelectedOutputs = [...new Set([...selectedOutputs, value])]
|
||||
newSelectedOutputs = [...new Set([...selectedOutputs, emittedValue])]
|
||||
} else {
|
||||
newSelectedOutputs = selectedOutputs.filter((id) => id !== value)
|
||||
newSelectedOutputs = selectedOutputs.filter((id) => id !== emittedValue)
|
||||
}
|
||||
|
||||
onOutputSelect(newSelectedOutputs)
|
||||
@@ -359,48 +443,73 @@ export function OutputSelect({
|
||||
/>
|
||||
</button>
|
||||
|
||||
{isOutputDropdownOpen && workflowOutputs.length > 0 && (
|
||||
<div className='absolute left-0 z-50 mt-1 w-full overflow-hidden rounded-[8px] border border-[#E5E5E5] bg-[#FFFFFF] pt-1 shadow-xs dark:border-[#414141] dark:bg-[var(--surface-elevated)]'>
|
||||
<div className='max-h-[230px] overflow-y-auto'>
|
||||
{Object.entries(groupedOutputs).map(([blockName, outputs]) => (
|
||||
<div key={blockName}>
|
||||
<div className='border-[#E5E5E5] border-t px-3 pt-1.5 pb-0.5 font-normal text-muted-foreground text-xs first:border-t-0 dark:border-[#414141]'>
|
||||
{blockName}
|
||||
</div>
|
||||
<div>
|
||||
{outputs.map((output) => (
|
||||
<button
|
||||
type='button'
|
||||
key={output.id}
|
||||
onClick={() => handleOutputSelection(output.id)}
|
||||
className={cn(
|
||||
'flex w-full items-center gap-2 px-3 py-1.5 text-left font-normal text-sm',
|
||||
'hover:bg-accent hover:text-accent-foreground',
|
||||
'focus:bg-accent focus:text-accent-foreground focus:outline-none'
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className='flex h-5 w-5 flex-shrink-0 items-center justify-center rounded'
|
||||
style={{
|
||||
backgroundColor: getOutputColor(output.blockId, output.blockType),
|
||||
}}
|
||||
>
|
||||
<span className='h-3 w-3 font-bold text-white text-xs'>
|
||||
{blockName.charAt(0).toUpperCase()}
|
||||
</span>
|
||||
</div>
|
||||
<span className='flex-1 truncate'>{output.path}</span>
|
||||
{selectedOutputs.includes(output.id) && (
|
||||
<Check className='h-4 w-4 flex-shrink-0 text-muted-foreground' />
|
||||
)}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
{isOutputDropdownOpen &&
|
||||
workflowOutputs.length > 0 &&
|
||||
portalStyle &&
|
||||
createPortal(
|
||||
<div
|
||||
ref={portalRef}
|
||||
style={{
|
||||
position: 'fixed',
|
||||
top: portalStyle.top - 1, // overlap border by 1px to avoid visible gap
|
||||
left: portalStyle.left,
|
||||
width: portalStyle.width,
|
||||
zIndex: 2147483647,
|
||||
pointerEvents: 'auto',
|
||||
}}
|
||||
className='mt-0'
|
||||
data-rs-scroll-lock-ignore
|
||||
>
|
||||
<div className='overflow-hidden rounded-[8px] border border-[#E5E5E5] bg-[#FFFFFF] pt-1 shadow-xs dark:border-[#414141] dark:bg-[var(--surface-elevated)]'>
|
||||
<div
|
||||
className='overflow-y-auto overscroll-contain'
|
||||
style={{ maxHeight: portalStyle.height }}
|
||||
onWheel={(e) => {
|
||||
// Keep wheel scroll inside the dropdown and avoid dialog/body scroll locks
|
||||
e.stopPropagation()
|
||||
}}
|
||||
>
|
||||
{Object.entries(groupedOutputs).map(([blockName, outputs]) => (
|
||||
<div key={blockName}>
|
||||
<div className='border-[#E5E5E5] border-t px-3 pt-1.5 pb-0.5 font-normal text-muted-foreground text-xs first:border-t-0 dark:border-[#414141]'>
|
||||
{blockName}
|
||||
</div>
|
||||
<div>
|
||||
{outputs.map((output) => (
|
||||
<button
|
||||
type='button'
|
||||
key={output.id}
|
||||
onClick={() => handleOutputSelection(output.label)}
|
||||
className={cn(
|
||||
'flex w-full items-center gap-2 px-3 py-1.5 text-left font-normal text-sm',
|
||||
'hover:bg-accent hover:text-accent-foreground',
|
||||
'focus:bg-accent focus:text-accent-foreground focus:outline-none'
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className='flex h-5 w-5 flex-shrink-0 items-center justify-center rounded'
|
||||
style={{
|
||||
backgroundColor: getOutputColor(output.blockId, output.blockType),
|
||||
}}
|
||||
>
|
||||
<span className='h-3 w-3 font-bold text-white text-xs'>
|
||||
{blockName.charAt(0).toUpperCase()}
|
||||
</span>
|
||||
</div>
|
||||
<span className='flex-1 truncate'>{output.path}</span>
|
||||
{isSelectedValue(output) && (
|
||||
<Check className='h-4 w-4 flex-shrink-0 text-muted-foreground' />
|
||||
)}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>,
|
||||
document.body
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -13,13 +13,16 @@ import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
|
||||
import { CodeLanguage } from '@/lib/execution/languages'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { isLikelyReferenceSegment, SYSTEM_REFERENCE_PREFIXES } from '@/lib/workflows/references'
|
||||
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
|
||||
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
|
||||
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
|
||||
import type { GenerationType } from '@/blocks/types'
|
||||
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
|
||||
import { useTagSelection } from '@/hooks/use-tag-selection'
|
||||
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
|
||||
import { normalizeBlockName } from '@/stores/workflows/utils'
|
||||
|
||||
const logger = createLogger('Code')
|
||||
|
||||
@@ -99,6 +102,8 @@ export function Code({
|
||||
const [activeSourceBlockId, setActiveSourceBlockId] = useState<string | null>(null)
|
||||
const [visualLineHeights, setVisualLineHeights] = useState<number[]>([])
|
||||
|
||||
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
|
||||
|
||||
const collapsedStateKey = `${subBlockId}_collapsed`
|
||||
const isCollapsed =
|
||||
(useSubBlockStore((state) => state.getValue(blockId, collapsedStateKey)) as boolean) ?? false
|
||||
@@ -354,6 +359,30 @@ IMPORTANT FORMATTING RULES:
|
||||
}, 0)
|
||||
}
|
||||
|
||||
const shouldHighlightReference = (part: string): boolean => {
|
||||
if (!part.startsWith('<') || !part.endsWith('>')) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isLikelyReferenceSegment(part)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!accessiblePrefixes) {
|
||||
return true
|
||||
}
|
||||
|
||||
const inner = part.slice(1, -1)
|
||||
const [prefix] = inner.split('.')
|
||||
const normalizedPrefix = normalizeBlockName(prefix)
|
||||
|
||||
if (SYSTEM_REFERENCE_PREFIXES.has(normalizedPrefix)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return accessiblePrefixes.has(normalizedPrefix)
|
||||
}
|
||||
|
||||
const renderLineNumbers = (): ReactElement[] => {
|
||||
const numbers: ReactElement[] = []
|
||||
let lineNumber = 1
|
||||
@@ -490,13 +519,51 @@ IMPORTANT FORMATTING RULES:
|
||||
e.preventDefault()
|
||||
}
|
||||
}}
|
||||
highlight={(codeToHighlight) =>
|
||||
highlight(
|
||||
codeToHighlight,
|
||||
languages[effectiveLanguage === 'python' ? 'python' : 'javascript'],
|
||||
effectiveLanguage === 'python' ? 'python' : 'javascript'
|
||||
)
|
||||
}
|
||||
highlight={(codeToHighlight) => {
|
||||
const placeholders: { placeholder: string; original: string; type: 'var' | 'env' }[] =
|
||||
[]
|
||||
let processedCode = codeToHighlight
|
||||
|
||||
// Replace environment variables with placeholders
|
||||
processedCode = processedCode.replace(/\{\{([^}]+)\}\}/g, (match) => {
|
||||
const placeholder = `__ENV_VAR_${placeholders.length}__`
|
||||
placeholders.push({ placeholder, original: match, type: 'env' })
|
||||
return placeholder
|
||||
})
|
||||
|
||||
// Replace variable references with placeholders
|
||||
processedCode = processedCode.replace(/<([^>]+)>/g, (match) => {
|
||||
if (shouldHighlightReference(match)) {
|
||||
const placeholder = `__VAR_REF_${placeholders.length}__`
|
||||
placeholders.push({ placeholder, original: match, type: 'var' })
|
||||
return placeholder
|
||||
}
|
||||
return match
|
||||
})
|
||||
|
||||
// Apply Prism syntax highlighting
|
||||
const lang = effectiveLanguage === 'python' ? 'python' : 'javascript'
|
||||
let highlightedCode = highlight(processedCode, languages[lang], lang)
|
||||
|
||||
// Restore and highlight the placeholders
|
||||
placeholders.forEach(({ placeholder, original, type }) => {
|
||||
if (type === 'env') {
|
||||
highlightedCode = highlightedCode.replace(
|
||||
placeholder,
|
||||
`<span class="text-blue-500">${original}</span>`
|
||||
)
|
||||
} else if (type === 'var') {
|
||||
// Escape the < and > for display
|
||||
const escaped = original.replace(/</g, '<').replace(/>/g, '>')
|
||||
highlightedCode = highlightedCode.replace(
|
||||
placeholder,
|
||||
`<span class="text-blue-500">${escaped}</span>`
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
return highlightedCode
|
||||
}}
|
||||
padding={12}
|
||||
style={{
|
||||
fontFamily: 'inherit',
|
||||
|
||||
@@ -9,6 +9,7 @@ import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
|
||||
import { MAX_TAG_SLOTS } from '@/lib/knowledge/consts'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
|
||||
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
|
||||
import type { SubBlockConfig } from '@/blocks/types'
|
||||
import { useKnowledgeBaseTagDefinitions } from '@/hooks/use-knowledge-base-tag-definitions'
|
||||
import { useTagSelection } from '@/hooks/use-tag-selection'
|
||||
@@ -40,6 +41,7 @@ export function DocumentTagEntry({
|
||||
isConnecting = false,
|
||||
}: DocumentTagEntryProps) {
|
||||
const [storeValue, setStoreValue] = useSubBlockValue<string>(blockId, subBlock.id)
|
||||
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
|
||||
|
||||
// Get the knowledge base ID from other sub-blocks
|
||||
const [knowledgeBaseIdValue] = useSubBlockValue(blockId, 'knowledgeBaseId')
|
||||
@@ -301,7 +303,12 @@ export function DocumentTagEntry({
|
||||
)}
|
||||
/>
|
||||
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
|
||||
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
|
||||
<div className='whitespace-pre'>
|
||||
{formatDisplayText(cellValue, {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
{showDropdown && availableTagDefinitions.length > 0 && (
|
||||
<div className='absolute top-full left-0 z-[100] mt-1 w-full'>
|
||||
@@ -389,7 +396,10 @@ export function DocumentTagEntry({
|
||||
/>
|
||||
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
|
||||
<div className='whitespace-pre text-muted-foreground'>
|
||||
{formatDisplayText(cellValue)}
|
||||
{formatDisplayText(cellValue, {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
{showTypeDropdown && !isReadOnly && (
|
||||
@@ -469,7 +479,12 @@ export function DocumentTagEntry({
|
||||
className='w-full border-0 text-transparent caret-foreground placeholder:text-muted-foreground/50 focus-visible:ring-0 focus-visible:ring-offset-0'
|
||||
/>
|
||||
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
|
||||
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
|
||||
<div className='whitespace-pre'>
|
||||
{formatDisplayText(cellValue, {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
@@ -239,7 +239,12 @@ export function KnowledgeTagFilters({
|
||||
onBlur={handleBlur}
|
||||
/>
|
||||
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
|
||||
<div className='whitespace-pre'>{formatDisplayText(cellValue || 'Select tag')}</div>
|
||||
<div className='whitespace-pre'>
|
||||
{formatDisplayText(cellValue || 'Select tag', {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
{showDropdown && tagDefinitions.length > 0 && (
|
||||
<div className='absolute top-full left-0 z-[100] mt-1 w-full'>
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { useCallback } from 'react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import { formatDisplayText } from '@/components/ui/formatted-text'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { Label } from '@/components/ui/label'
|
||||
import {
|
||||
@@ -14,6 +15,7 @@ import { Switch } from '@/components/ui/switch'
|
||||
import { Textarea } from '@/components/ui/textarea'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
|
||||
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
|
||||
import { useMcpTools } from '@/hooks/use-mcp-tools'
|
||||
import { formatParameterLabel } from '@/tools/params'
|
||||
|
||||
@@ -37,6 +39,7 @@ export function McpDynamicArgs({
|
||||
const { mcpTools } = useMcpTools(workspaceId)
|
||||
const [selectedTool] = useSubBlockValue(blockId, 'tool')
|
||||
const [toolArgs, setToolArgs] = useSubBlockValue(blockId, subBlockId)
|
||||
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
|
||||
|
||||
const selectedToolConfig = mcpTools.find((tool) => tool.id === selectedTool)
|
||||
const toolSchema = selectedToolConfig?.inputSchema
|
||||
@@ -180,7 +183,7 @@ export function McpDynamicArgs({
|
||||
|
||||
case 'long-input':
|
||||
return (
|
||||
<div key={`${paramName}-long`}>
|
||||
<div key={`${paramName}-long`} className='relative'>
|
||||
<Textarea
|
||||
value={value || ''}
|
||||
onChange={(e) => updateParameter(paramName, e.target.value, paramSchema)}
|
||||
@@ -192,8 +195,14 @@ export function McpDynamicArgs({
|
||||
}
|
||||
disabled={disabled}
|
||||
rows={4}
|
||||
className='min-h-[80px] resize-none'
|
||||
className='min-h-[80px] resize-none text-transparent caret-foreground'
|
||||
/>
|
||||
<div className='pointer-events-none absolute inset-0 overflow-auto whitespace-pre-wrap break-words p-3 text-sm'>
|
||||
{formatDisplayText(value || '', {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -203,9 +212,10 @@ export function McpDynamicArgs({
|
||||
paramName.toLowerCase().includes('password') ||
|
||||
paramName.toLowerCase().includes('token')
|
||||
const isNumeric = paramSchema.type === 'number' || paramSchema.type === 'integer'
|
||||
const isTextInput = !isPassword && !isNumeric
|
||||
|
||||
return (
|
||||
<div key={`${paramName}-short`}>
|
||||
<div key={`${paramName}-short`} className={isTextInput ? 'relative' : ''}>
|
||||
<Input
|
||||
type={isPassword ? 'password' : isNumeric ? 'number' : 'text'}
|
||||
value={value || ''}
|
||||
@@ -231,7 +241,18 @@ export function McpDynamicArgs({
|
||||
`Enter ${formatParameterLabel(paramName).toLowerCase()}`
|
||||
}
|
||||
disabled={disabled}
|
||||
className={isTextInput ? 'text-transparent caret-foreground' : ''}
|
||||
/>
|
||||
{isTextInput && (
|
||||
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
|
||||
<div className='whitespace-pre'>
|
||||
{formatDisplayText(value?.toString() || '', {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ import { ResponseFormat as SharedResponseFormat } from '@/app/workspace/[workspa
|
||||
export interface JSONProperty {
|
||||
id: string
|
||||
key: string
|
||||
type: 'string' | 'number' | 'boolean' | 'object' | 'array'
|
||||
type: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'file'
|
||||
value?: any
|
||||
collapsed?: boolean
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { ChevronDown, Plus, Trash } from 'lucide-react'
|
||||
import { ChevronDown, Paperclip, Plus, Trash } from 'lucide-react'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import {
|
||||
@@ -27,7 +27,7 @@ import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/
|
||||
interface Field {
|
||||
id: string
|
||||
name: string
|
||||
type?: 'string' | 'number' | 'boolean' | 'object' | 'array'
|
||||
type?: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'files'
|
||||
value?: string
|
||||
collapsed?: boolean
|
||||
}
|
||||
@@ -339,37 +339,46 @@ export function FieldFormat({
|
||||
onClick={() => updateField(field.id, 'type', 'string')}
|
||||
className='cursor-pointer'
|
||||
>
|
||||
<span className='mr-2 font-mono'>Aa</span>
|
||||
<span className='mr-2 w-6 text-center font-mono'>Aa</span>
|
||||
<span>String</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => updateField(field.id, 'type', 'number')}
|
||||
className='cursor-pointer'
|
||||
>
|
||||
<span className='mr-2 font-mono'>123</span>
|
||||
<span className='mr-2 w-6 text-center font-mono'>123</span>
|
||||
<span>Number</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => updateField(field.id, 'type', 'boolean')}
|
||||
className='cursor-pointer'
|
||||
>
|
||||
<span className='mr-2 font-mono'>0/1</span>
|
||||
<span className='mr-2 w-6 text-center font-mono'>0/1</span>
|
||||
<span>Boolean</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => updateField(field.id, 'type', 'object')}
|
||||
className='cursor-pointer'
|
||||
>
|
||||
<span className='mr-2 font-mono'>{'{}'}</span>
|
||||
<span className='mr-2 w-6 text-center font-mono'>{'{}'}</span>
|
||||
<span>Object</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => updateField(field.id, 'type', 'array')}
|
||||
className='cursor-pointer'
|
||||
>
|
||||
<span className='mr-2 font-mono'>[]</span>
|
||||
<span className='mr-2 w-6 text-center font-mono'>[]</span>
|
||||
<span>Array</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => updateField(field.id, 'type', 'files')}
|
||||
className='cursor-pointer'
|
||||
>
|
||||
<div className='mr-2 flex w-6 justify-center'>
|
||||
<Paperclip className='h-4 w-4' />
|
||||
</div>
|
||||
<span>Files</span>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
|
||||
@@ -8,6 +8,7 @@ import { Input } from '@/components/ui/input'
|
||||
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
|
||||
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
|
||||
|
||||
interface TableProps {
|
||||
blockId: string
|
||||
@@ -34,6 +35,7 @@ export function Table({
|
||||
const params = useParams()
|
||||
const workspaceId = params.workspaceId as string
|
||||
const [storeValue, setStoreValue] = useSubBlockValue<TableRow[]>(blockId, subBlockId)
|
||||
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
|
||||
|
||||
// Use preview value when in preview mode, otherwise use store value
|
||||
const value = isPreview ? previewValue : storeValue
|
||||
@@ -240,7 +242,12 @@ export function Table({
|
||||
data-overlay={cellKey}
|
||||
className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'
|
||||
>
|
||||
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
|
||||
<div className='whitespace-pre'>
|
||||
{formatDisplayText(cellValue, {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
CommandItem,
|
||||
CommandList,
|
||||
} from '@/components/ui/command'
|
||||
import { formatDisplayText } from '@/components/ui/formatted-text'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { Label } from '@/components/ui/label'
|
||||
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
|
||||
@@ -23,9 +24,11 @@ import {
|
||||
import { Switch } from '@/components/ui/switch'
|
||||
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
|
||||
import type { TriggerConfig } from '@/triggers/types'
|
||||
|
||||
interface TriggerConfigSectionProps {
|
||||
blockId: string
|
||||
triggerDef: TriggerConfig
|
||||
config: Record<string, any>
|
||||
onChange: (fieldId: string, value: any) => void
|
||||
@@ -34,6 +37,7 @@ interface TriggerConfigSectionProps {
|
||||
}
|
||||
|
||||
export function TriggerConfigSection({
|
||||
blockId,
|
||||
triggerDef,
|
||||
config,
|
||||
onChange,
|
||||
@@ -42,6 +46,7 @@ export function TriggerConfigSection({
|
||||
}: TriggerConfigSectionProps) {
|
||||
const [showSecrets, setShowSecrets] = useState<Record<string, boolean>>({})
|
||||
const [copied, setCopied] = useState<string | null>(null)
|
||||
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
|
||||
|
||||
const copyToClipboard = (text: string, type: string) => {
|
||||
navigator.clipboard.writeText(text)
|
||||
@@ -258,9 +263,20 @@ export function TriggerConfigSection({
|
||||
className={cn(
|
||||
'h-9 rounded-[8px]',
|
||||
isSecret ? 'pr-32' : '',
|
||||
'focus-visible:ring-2 focus-visible:ring-primary/20'
|
||||
'focus-visible:ring-2 focus-visible:ring-primary/20',
|
||||
!isSecret && 'text-transparent caret-foreground'
|
||||
)}
|
||||
/>
|
||||
{!isSecret && (
|
||||
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
|
||||
<div className='whitespace-pre'>
|
||||
{formatDisplayText(value?.toString() || '', {
|
||||
accessiblePrefixes,
|
||||
highlightAll: !accessiblePrefixes,
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{isSecret && (
|
||||
<div className='absolute top-0.5 right-0.5 flex h-8 items-center gap-1 pr-1'>
|
||||
<Button
|
||||
|
||||
@@ -467,6 +467,7 @@ export function TriggerModal({
|
||||
)}
|
||||
|
||||
<TriggerConfigSection
|
||||
blockId={blockId}
|
||||
triggerDef={triggerDef}
|
||||
config={config}
|
||||
onChange={handleConfigChange}
|
||||
|
||||
@@ -203,8 +203,14 @@ export function useWand({
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const lineData = line.substring(6)
|
||||
|
||||
if (lineData === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const data = JSON.parse(line.substring(6))
|
||||
const data = JSON.parse(lineData)
|
||||
|
||||
if (data.error) {
|
||||
throw new Error(data.error)
|
||||
|
||||
@@ -30,9 +30,10 @@ interface ExecutorOptions {
|
||||
workflowVariables?: Record<string, any>
|
||||
contextExtensions?: {
|
||||
stream?: boolean
|
||||
selectedOutputIds?: string[]
|
||||
selectedOutputs?: string[]
|
||||
edges?: Array<{ source: string; target: string }>
|
||||
onStream?: (streamingExecution: StreamingExecution) => Promise<void>
|
||||
onBlockComplete?: (blockId: string, output: any) => Promise<void>
|
||||
executionId?: string
|
||||
workspaceId?: string
|
||||
}
|
||||
@@ -44,6 +45,56 @@ interface DebugValidationResult {
|
||||
error?: string
|
||||
}
|
||||
|
||||
const WORKFLOW_EXECUTION_FAILURE_MESSAGE = 'Workflow execution failed'
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null
|
||||
}
|
||||
|
||||
function sanitizeMessage(value: unknown): string | undefined {
|
||||
if (typeof value !== 'string') return undefined
|
||||
const trimmed = value.trim()
|
||||
if (!trimmed || trimmed === 'undefined (undefined)') return undefined
|
||||
return trimmed
|
||||
}
|
||||
|
||||
function normalizeErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
const message = sanitizeMessage(error.message)
|
||||
if (message) return message
|
||||
} else if (typeof error === 'string') {
|
||||
const message = sanitizeMessage(error)
|
||||
if (message) return message
|
||||
}
|
||||
|
||||
if (isRecord(error)) {
|
||||
const directMessage = sanitizeMessage(error.message)
|
||||
if (directMessage) return directMessage
|
||||
|
||||
const nestedError = error.error
|
||||
if (isRecord(nestedError)) {
|
||||
const nestedMessage = sanitizeMessage(nestedError.message)
|
||||
if (nestedMessage) return nestedMessage
|
||||
} else {
|
||||
const nestedMessage = sanitizeMessage(nestedError)
|
||||
if (nestedMessage) return nestedMessage
|
||||
}
|
||||
}
|
||||
|
||||
return WORKFLOW_EXECUTION_FAILURE_MESSAGE
|
||||
}
|
||||
|
||||
function isExecutionResult(value: unknown): value is ExecutionResult {
|
||||
if (!isRecord(value)) return false
|
||||
return typeof value.success === 'boolean' && isRecord(value.output)
|
||||
}
|
||||
|
||||
function extractExecutionResult(error: unknown): ExecutionResult | null {
|
||||
if (!isRecord(error)) return null
|
||||
const candidate = error.executionResult
|
||||
return isExecutionResult(candidate) ? candidate : null
|
||||
}
|
||||
|
||||
export function useWorkflowExecution() {
|
||||
const currentWorkflow = useCurrentWorkflow()
|
||||
const { activeWorkflowId, workflows } = useWorkflowRegistry()
|
||||
@@ -273,7 +324,7 @@ export function useWorkflowExecution() {
|
||||
if (isChatExecution) {
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
const { encodeSSE } = await import('@/lib/utils')
|
||||
const executionId = uuidv4()
|
||||
const streamedContent = new Map<string, string>()
|
||||
const streamReadingPromises: Promise<void>[] = []
|
||||
@@ -360,6 +411,8 @@ export function useWorkflowExecution() {
|
||||
if (!streamingExecution.stream) return
|
||||
const reader = streamingExecution.stream.getReader()
|
||||
const blockId = (streamingExecution.execution as any)?.blockId
|
||||
let isFirstChunk = true
|
||||
|
||||
if (blockId) {
|
||||
streamedContent.set(blockId, '')
|
||||
}
|
||||
@@ -373,14 +426,17 @@ export function useWorkflowExecution() {
|
||||
if (blockId) {
|
||||
streamedContent.set(blockId, (streamedContent.get(blockId) || '') + chunk)
|
||||
}
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({
|
||||
blockId,
|
||||
chunk,
|
||||
})}\n\n`
|
||||
)
|
||||
)
|
||||
|
||||
// Add separator before first chunk if this isn't the first block
|
||||
let chunkToSend = chunk
|
||||
if (isFirstChunk && streamedContent.size > 1) {
|
||||
chunkToSend = `\n\n${chunk}`
|
||||
isFirstChunk = false
|
||||
} else if (isFirstChunk) {
|
||||
isFirstChunk = false
|
||||
}
|
||||
|
||||
controller.enqueue(encodeSSE({ blockId, chunk: chunkToSend }))
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error reading from stream:', error)
|
||||
@@ -390,8 +446,58 @@ export function useWorkflowExecution() {
|
||||
streamReadingPromises.push(promise)
|
||||
}
|
||||
|
||||
// Handle non-streaming blocks (like Function blocks)
|
||||
const onBlockComplete = async (blockId: string, output: any) => {
|
||||
// Get selected outputs from chat store
|
||||
const chatStore = await import('@/stores/panel/chat/store').then(
|
||||
(mod) => mod.useChatStore
|
||||
)
|
||||
const selectedOutputs = chatStore
|
||||
.getState()
|
||||
.getSelectedWorkflowOutput(activeWorkflowId)
|
||||
|
||||
if (!selectedOutputs?.length) return
|
||||
|
||||
const { extractBlockIdFromOutputId, extractPathFromOutputId, traverseObjectPath } =
|
||||
await import('@/lib/response-format')
|
||||
|
||||
// Check if this block's output is selected
|
||||
const matchingOutputs = selectedOutputs.filter(
|
||||
(outputId) => extractBlockIdFromOutputId(outputId) === blockId
|
||||
)
|
||||
|
||||
if (!matchingOutputs.length) return
|
||||
|
||||
// Process each selected output from this block
|
||||
for (const outputId of matchingOutputs) {
|
||||
const path = extractPathFromOutputId(outputId, blockId)
|
||||
const outputValue = traverseObjectPath(output, path)
|
||||
|
||||
if (outputValue !== undefined) {
|
||||
const formattedOutput =
|
||||
typeof outputValue === 'string'
|
||||
? outputValue
|
||||
: JSON.stringify(outputValue, null, 2)
|
||||
|
||||
// Add separator if this isn't the first output
|
||||
const separator = streamedContent.size > 0 ? '\n\n' : ''
|
||||
|
||||
// Send the non-streaming block output as a chunk
|
||||
controller.enqueue(encodeSSE({ blockId, chunk: separator + formattedOutput }))
|
||||
|
||||
// Track that we've sent output for this block
|
||||
streamedContent.set(blockId, formattedOutput)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await executeWorkflow(workflowInput, onStream, executionId)
|
||||
const result = await executeWorkflow(
|
||||
workflowInput,
|
||||
onStream,
|
||||
executionId,
|
||||
onBlockComplete
|
||||
)
|
||||
|
||||
// Check if execution was cancelled
|
||||
if (
|
||||
@@ -400,11 +506,7 @@ export function useWorkflowExecution() {
|
||||
!result.success &&
|
||||
result.error === 'Workflow execution was cancelled'
|
||||
) {
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({ event: 'cancelled', data: result })}\n\n`
|
||||
)
|
||||
)
|
||||
controller.enqueue(encodeSSE({ event: 'cancelled', data: result }))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -439,9 +541,8 @@ export function useWorkflowExecution() {
|
||||
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
|
||||
}
|
||||
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
|
||||
)
|
||||
const { encodeSSE } = await import('@/lib/utils')
|
||||
controller.enqueue(encodeSSE({ event: 'final', data: result }))
|
||||
persistLogs(executionId, result).catch((err) =>
|
||||
logger.error('Error persisting logs:', err)
|
||||
)
|
||||
@@ -461,9 +562,8 @@ export function useWorkflowExecution() {
|
||||
}
|
||||
|
||||
// Send the error as final event so downstream handlers can treat it uniformly
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ event: 'final', data: errorResult })}\n\n`)
|
||||
)
|
||||
const { encodeSSE } = await import('@/lib/utils')
|
||||
controller.enqueue(encodeSSE({ event: 'final', data: errorResult }))
|
||||
|
||||
// Persist the error to logs so it shows up in the logs page
|
||||
persistLogs(executionId, errorResult).catch((err) =>
|
||||
@@ -539,7 +639,8 @@ export function useWorkflowExecution() {
|
||||
const executeWorkflow = async (
|
||||
workflowInput?: any,
|
||||
onStream?: (se: StreamingExecution) => Promise<void>,
|
||||
executionId?: string
|
||||
executionId?: string,
|
||||
onBlockComplete?: (blockId: string, output: any) => Promise<void>
|
||||
): Promise<ExecutionResult | StreamingExecution> => {
|
||||
// Use currentWorkflow but check if we're in diff mode
|
||||
const { blocks: workflowBlocks, edges: workflowEdges } = currentWorkflow
|
||||
@@ -664,11 +765,11 @@ export function useWorkflowExecution() {
|
||||
)
|
||||
|
||||
// If this is a chat execution, get the selected outputs
|
||||
let selectedOutputIds: string[] | undefined
|
||||
let selectedOutputs: string[] | undefined
|
||||
if (isExecutingFromChat && activeWorkflowId) {
|
||||
// Get selected outputs from chat store
|
||||
const chatStore = await import('@/stores/panel/chat/store').then((mod) => mod.useChatStore)
|
||||
selectedOutputIds = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
|
||||
selectedOutputs = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
|
||||
}
|
||||
|
||||
// Helper to extract test values from inputFormat subblock
|
||||
@@ -843,12 +944,13 @@ export function useWorkflowExecution() {
|
||||
workflowVariables,
|
||||
contextExtensions: {
|
||||
stream: isExecutingFromChat,
|
||||
selectedOutputIds,
|
||||
selectedOutputs,
|
||||
edges: workflow.connections.map((conn) => ({
|
||||
source: conn.source,
|
||||
target: conn.target,
|
||||
})),
|
||||
onStream,
|
||||
onBlockComplete,
|
||||
executionId,
|
||||
workspaceId,
|
||||
},
|
||||
@@ -862,74 +964,56 @@ export function useWorkflowExecution() {
|
||||
return newExecutor.execute(activeWorkflowId || '', startBlockId)
|
||||
}
|
||||
|
||||
const handleExecutionError = (error: any, options?: { executionId?: string }) => {
|
||||
let errorMessage = 'Unknown error'
|
||||
if (error instanceof Error) {
|
||||
errorMessage = error.message || `Error: ${String(error)}`
|
||||
} else if (typeof error === 'string') {
|
||||
errorMessage = error
|
||||
} else if (error && typeof error === 'object') {
|
||||
if (
|
||||
error.message === 'undefined (undefined)' ||
|
||||
(error.error &&
|
||||
typeof error.error === 'object' &&
|
||||
error.error.message === 'undefined (undefined)')
|
||||
) {
|
||||
errorMessage = 'API request failed - no specific error details available'
|
||||
} else if (error.message) {
|
||||
errorMessage = error.message
|
||||
} else if (error.error && typeof error.error === 'string') {
|
||||
errorMessage = error.error
|
||||
} else if (error.error && typeof error.error === 'object' && error.error.message) {
|
||||
errorMessage = error.error.message
|
||||
} else {
|
||||
try {
|
||||
errorMessage = `Error details: ${JSON.stringify(error)}`
|
||||
} catch {
|
||||
errorMessage = 'Error occurred but details could not be displayed'
|
||||
}
|
||||
const handleExecutionError = (error: unknown, options?: { executionId?: string }) => {
|
||||
const normalizedMessage = normalizeErrorMessage(error)
|
||||
const executionResultFromError = extractExecutionResult(error)
|
||||
|
||||
let errorResult: ExecutionResult
|
||||
|
||||
if (executionResultFromError) {
|
||||
const logs = Array.isArray(executionResultFromError.logs) ? executionResultFromError.logs : []
|
||||
|
||||
errorResult = {
|
||||
...executionResultFromError,
|
||||
success: false,
|
||||
error: executionResultFromError.error ?? normalizedMessage,
|
||||
logs,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!executor) {
|
||||
try {
|
||||
let blockId = 'serialization'
|
||||
let blockName = 'Workflow'
|
||||
let blockType = 'serializer'
|
||||
if (error instanceof WorkflowValidationError) {
|
||||
blockId = error.blockId || blockId
|
||||
blockName = error.blockName || blockName
|
||||
blockType = error.blockType || blockType
|
||||
}
|
||||
|
||||
if (errorMessage === 'undefined (undefined)') {
|
||||
errorMessage = 'API request failed - no specific error details available'
|
||||
}
|
||||
useConsoleStore.getState().addConsole({
|
||||
input: {},
|
||||
output: {},
|
||||
success: false,
|
||||
error: normalizedMessage,
|
||||
durationMs: 0,
|
||||
startedAt: new Date().toISOString(),
|
||||
endedAt: new Date().toISOString(),
|
||||
workflowId: activeWorkflowId || '',
|
||||
blockId,
|
||||
executionId: options?.executionId,
|
||||
blockName,
|
||||
blockType,
|
||||
})
|
||||
} catch {}
|
||||
}
|
||||
|
||||
// If we failed before creating an executor (e.g., serializer validation), add a console entry
|
||||
if (!executor) {
|
||||
try {
|
||||
// Prefer attributing to specific subflow if we have a structured error
|
||||
let blockId = 'serialization'
|
||||
let blockName = 'Workflow'
|
||||
let blockType = 'serializer'
|
||||
if (error instanceof WorkflowValidationError) {
|
||||
blockId = error.blockId || blockId
|
||||
blockName = error.blockName || blockName
|
||||
blockType = error.blockType || blockType
|
||||
}
|
||||
|
||||
useConsoleStore.getState().addConsole({
|
||||
input: {},
|
||||
output: {},
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
durationMs: 0,
|
||||
startedAt: new Date().toISOString(),
|
||||
endedAt: new Date().toISOString(),
|
||||
workflowId: activeWorkflowId || '',
|
||||
blockId,
|
||||
executionId: options?.executionId,
|
||||
blockName,
|
||||
blockType,
|
||||
})
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const errorResult: ExecutionResult = {
|
||||
success: false,
|
||||
output: {},
|
||||
error: errorMessage,
|
||||
logs: [],
|
||||
errorResult = {
|
||||
success: false,
|
||||
output: {},
|
||||
error: normalizedMessage,
|
||||
logs: [],
|
||||
}
|
||||
}
|
||||
|
||||
setExecutionResult(errorResult)
|
||||
@@ -937,16 +1021,14 @@ export function useWorkflowExecution() {
|
||||
setIsDebugging(false)
|
||||
setActiveBlocks(new Set())
|
||||
|
||||
let notificationMessage = 'Workflow execution failed'
|
||||
if (error?.request?.url) {
|
||||
if (error.request.url && error.request.url.trim() !== '') {
|
||||
notificationMessage += `: Request to ${error.request.url} failed`
|
||||
if (error.status) {
|
||||
notificationMessage += ` (Status: ${error.status})`
|
||||
}
|
||||
let notificationMessage = WORKFLOW_EXECUTION_FAILURE_MESSAGE
|
||||
if (isRecord(error) && isRecord(error.request) && sanitizeMessage(error.request.url)) {
|
||||
notificationMessage += `: Request to ${(error.request.url as string).trim()} failed`
|
||||
if ('status' in error && typeof error.status === 'number') {
|
||||
notificationMessage += ` (Status: ${error.status})`
|
||||
}
|
||||
} else {
|
||||
notificationMessage += `: ${errorMessage}`
|
||||
} else if (sanitizeMessage(errorResult.error)) {
|
||||
notificationMessage += `: ${errorResult.error}`
|
||||
}
|
||||
|
||||
return errorResult
|
||||
|
||||
@@ -30,7 +30,7 @@ interface ExecutorOptions {
|
||||
workflowVariables?: Record<string, any>
|
||||
contextExtensions?: {
|
||||
stream?: boolean
|
||||
selectedOutputIds?: string[]
|
||||
selectedOutputs?: string[]
|
||||
edges?: Array<{ source: string; target: string }>
|
||||
onStream?: (streamingExecution: StreamingExecution) => Promise<void>
|
||||
executionId?: string
|
||||
@@ -181,11 +181,11 @@ export async function executeWorkflowWithLogging(
|
||||
)
|
||||
|
||||
// If this is a chat execution, get the selected outputs
|
||||
let selectedOutputIds: string[] | undefined
|
||||
let selectedOutputs: string[] | undefined
|
||||
if (isExecutingFromChat) {
|
||||
// Get selected outputs from chat store
|
||||
const chatStore = await import('@/stores/panel/chat/store').then((mod) => mod.useChatStore)
|
||||
selectedOutputIds = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
|
||||
selectedOutputs = chatStore.getState().getSelectedWorkflowOutput(activeWorkflowId)
|
||||
}
|
||||
|
||||
// Create executor options
|
||||
@@ -197,7 +197,7 @@ export async function executeWorkflowWithLogging(
|
||||
workflowVariables,
|
||||
contextExtensions: {
|
||||
stream: isExecutingFromChat,
|
||||
selectedOutputIds,
|
||||
selectedOutputs,
|
||||
edges: workflow.connections.map((conn) => ({
|
||||
source: conn.source,
|
||||
target: conn.target,
|
||||
|
||||
@@ -1422,7 +1422,7 @@ const WorkflowContent = React.memo(() => {
|
||||
setDraggedNodeId(node.id)
|
||||
|
||||
// Emit collaborative position update during drag for smooth real-time movement
|
||||
collaborativeUpdateBlockPosition(node.id, node.position)
|
||||
collaborativeUpdateBlockPosition(node.id, node.position, false)
|
||||
|
||||
// Get the current parent ID of the node being dragged
|
||||
const currentParentId = blocks[node.id]?.data?.parentId || null
|
||||
@@ -1608,7 +1608,7 @@ const WorkflowContent = React.memo(() => {
|
||||
|
||||
// Emit collaborative position update for the final position
|
||||
// This ensures other users see the smooth final position
|
||||
collaborativeUpdateBlockPosition(node.id, node.position)
|
||||
collaborativeUpdateBlockPosition(node.id, node.position, true)
|
||||
|
||||
// Record single move entry on drag end to avoid micro-moves
|
||||
try {
|
||||
|
||||
@@ -510,7 +510,7 @@ export function ApiKeys({ onOpenChange, registerCloseHandler }: ApiKeysProps) {
|
||||
setKeyType('personal')
|
||||
if (createError) setCreateError(null)
|
||||
}}
|
||||
className='h-8'
|
||||
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
|
||||
>
|
||||
Personal
|
||||
</Button>
|
||||
@@ -522,7 +522,7 @@ export function ApiKeys({ onOpenChange, registerCloseHandler }: ApiKeysProps) {
|
||||
setKeyType('workspace')
|
||||
if (createError) setCreateError(null)
|
||||
}}
|
||||
className='h-8'
|
||||
className='h-8 data-[variant=outline]:border-border data-[variant=outline]:bg-background data-[variant=outline]:text-foreground data-[variant=outline]:hover:bg-muted dark:data-[variant=outline]:border-border dark:data-[variant=outline]:bg-background dark:data-[variant=outline]:text-foreground dark:data-[variant=outline]:hover:bg-muted/80'
|
||||
>
|
||||
Workspace
|
||||
</Button>
|
||||
@@ -549,7 +549,7 @@ export function ApiKeys({ onOpenChange, registerCloseHandler }: ApiKeysProps) {
|
||||
|
||||
<AlertDialogFooter className='flex'>
|
||||
<AlertDialogCancel
|
||||
className='h-9 w-full rounded-[8px]'
|
||||
className='h-9 w-full rounded-[8px] border-border bg-background text-foreground hover:bg-muted dark:border-border dark:bg-background dark:text-foreground dark:hover:bg-muted/80'
|
||||
onClick={() => {
|
||||
setNewKeyName('')
|
||||
setKeyType('personal')
|
||||
|
||||
@@ -32,6 +32,7 @@ interface WorkflowPreviewProps {
|
||||
isPannable?: boolean
|
||||
defaultPosition?: { x: number; y: number }
|
||||
defaultZoom?: number
|
||||
fitPadding?: number
|
||||
onNodeClick?: (blockId: string, mousePosition: { x: number; y: number }) => void
|
||||
}
|
||||
|
||||
@@ -54,7 +55,8 @@ export function WorkflowPreview({
|
||||
width = '100%',
|
||||
isPannable = false,
|
||||
defaultPosition,
|
||||
defaultZoom,
|
||||
defaultZoom = 0.8,
|
||||
fitPadding = 0.25,
|
||||
onNodeClick,
|
||||
}: WorkflowPreviewProps) {
|
||||
// Check if the workflow state is valid
|
||||
@@ -274,6 +276,7 @@ export function WorkflowPreview({
|
||||
edgeTypes={edgeTypes}
|
||||
connectionLineType={ConnectionLineType.SmoothStep}
|
||||
fitView
|
||||
fitViewOptions={{ padding: fitPadding }}
|
||||
panOnScroll={false}
|
||||
panOnDrag={isPannable}
|
||||
zoomOnScroll={false}
|
||||
@@ -298,7 +301,12 @@ export function WorkflowPreview({
|
||||
: undefined
|
||||
}
|
||||
>
|
||||
<Background />
|
||||
<Background
|
||||
color='hsl(var(--workflow-dots))'
|
||||
size={4}
|
||||
gap={40}
|
||||
style={{ backgroundColor: 'hsl(var(--workflow-background))' }}
|
||||
/>
|
||||
</ReactFlow>
|
||||
</div>
|
||||
</ReactFlowProvider>
|
||||
|
||||
@@ -26,7 +26,7 @@ export type DocumentProcessingPayload = {
|
||||
|
||||
export const processDocument = task({
|
||||
id: 'knowledge-process-document',
|
||||
maxDuration: env.KB_CONFIG_MAX_DURATION || 300,
|
||||
maxDuration: env.KB_CONFIG_MAX_DURATION || 600,
|
||||
retry: {
|
||||
maxAttempts: env.KB_CONFIG_MAX_ATTEMPTS || 3,
|
||||
factor: env.KB_CONFIG_RETRY_FACTOR || 2,
|
||||
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
} from '@/lib/workflows/db-helpers'
|
||||
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
|
||||
import { Executor } from '@/executor'
|
||||
import type { ExecutionResult } from '@/executor/types'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
|
||||
@@ -386,6 +387,13 @@ async function executeWebhookJobInternal(
|
||||
|
||||
// Complete logging session with error (matching workflow-execution pattern)
|
||||
try {
|
||||
const executionResult = (error?.executionResult as ExecutionResult | undefined) || {
|
||||
success: false,
|
||||
output: {},
|
||||
logs: [],
|
||||
}
|
||||
const { traceSpans } = buildTraceSpans(executionResult)
|
||||
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
@@ -393,6 +401,7 @@ async function executeWebhookJobInternal(
|
||||
message: error.message || 'Webhook execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
traceSpans,
|
||||
})
|
||||
} catch (loggingError) {
|
||||
logger.error(`[${requestId}] Failed to complete logging session`, loggingError)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user