+ );
+}
+```
+
## Getting Your API Key
@@ -578,7 +976,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
diff --git a/apps/docs/content/docs/en/triggers/api.mdx b/apps/docs/content/docs/en/triggers/api.mdx
index 98dad0869..bc1604701 100644
--- a/apps/docs/content/docs/en/triggers/api.mdx
+++ b/apps/docs/content/docs/en/triggers/api.mdx
@@ -38,6 +38,84 @@ curl -X POST \
Successful responses return the serialized execution result from the Executor. Errors surface validation, auth, or workflow failures.
+## Streaming Responses
+
+Enable real-time streaming to receive workflow output as it's generated, character-by-character. This is useful for displaying AI responses progressively to users.
+
+### Request Parameters
+
+Add these parameters to enable streaming:
+
+- `stream` - Set to `true` to enable Server-Sent Events (SSE) streaming
+- `selectedOutputs` - Array of block outputs to stream (e.g., `["agent1.content"]`)
+
+### Block Output Format
+
+Use the `blockName.attribute` format to specify which block outputs to stream:
+- Format: `"blockName.attribute"` (e.g., If you want to stream the content of the Agent 1 block, you would use `"agent1.content"`)
+- Block names are case-insensitive and spaces are ignored
+
+### Example Request
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Count to five",
+ "stream": true,
+ "selectedOutputs": ["agent1.content"]
+ }'
+```
+
+### Response Format
+
+Streaming responses use Server-Sent Events (SSE) format:
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+Each event includes:
+- **Streaming chunks**: `{"blockId": "...", "chunk": "text"}` - Real-time text as it's generated
+- **Final event**: `{"event": "done", ...}` - Execution metadata and complete results
+- **Terminator**: `[DONE]` - Signals end of stream
+
+### Multiple Block Streaming
+
+When `selectedOutputs` includes multiple blocks, each chunk indicates which block produced it:
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Process this request",
+ "stream": true,
+ "selectedOutputs": ["agent1.content", "agent2.content"]
+ }'
+```
+
+The `blockId` field in each chunk lets you route output to the correct UI element:
+
+```
+data: {"blockId":"agent1-uuid","chunk":"Processing..."}
+
+data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
+
+data: {"blockId":"agent1-uuid","chunk":" complete"}
+```
+
## Output Reference
| Reference | Description |
diff --git a/apps/docs/content/docs/es/sdks/python.mdx b/apps/docs/content/docs/es/sdks/python.mdx
index 420300611..c915b0d11 100644
--- a/apps/docs/content/docs/es/sdks/python.mdx
+++ b/apps/docs/content/docs/es/sdks/python.mdx
@@ -214,7 +214,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +252,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,7 +284,7 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +298,7 @@ Ejecuta múltiples flujos de trabajo de manera eficiente:
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -352,8 +352,8 @@ Configura el cliente usando variables de entorno:
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY"),
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +365,13 @@ Configura el cliente usando variables de entorno:
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
diff --git a/apps/docs/content/docs/es/sdks/typescript.mdx b/apps/docs/content/docs/es/sdks/typescript.mdx
index 410554d7b..91af81332 100644
--- a/apps/docs/content/docs/es/sdks/typescript.mdx
+++ b/apps/docs/content/docs/es/sdks/typescript.mdx
@@ -230,7 +230,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +271,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +315,14 @@ Configura el cliente usando variables de entorno:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +333,14 @@ Configura el cliente usando variables de entorno:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +357,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +399,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -476,7 +476,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.NEXT_PUBLIC_SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -588,7 +588,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
diff --git a/apps/docs/content/docs/fr/sdks/python.mdx b/apps/docs/content/docs/fr/sdks/python.mdx
index 61b0fb5c6..e72bf1911 100644
--- a/apps/docs/content/docs/fr/sdks/python.mdx
+++ b/apps/docs/content/docs/fr/sdks/python.mdx
@@ -214,7 +214,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +252,7 @@ Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,7 +284,7 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +298,7 @@ Exécutez plusieurs flux de travail efficacement :
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -352,8 +352,8 @@ Configurez le client en utilisant des variables d'environnement :
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY"),
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +365,13 @@ Configurez le client en utilisant des variables d'environnement :
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
diff --git a/apps/docs/content/docs/fr/sdks/typescript.mdx b/apps/docs/content/docs/fr/sdks/typescript.mdx
index b57bbd080..1dec62fbd 100644
--- a/apps/docs/content/docs/fr/sdks/typescript.mdx
+++ b/apps/docs/content/docs/fr/sdks/typescript.mdx
@@ -230,7 +230,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +271,7 @@ Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +315,14 @@ Configurez le client en utilisant des variables d'environnement :
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +333,14 @@ Configurez le client en utilisant des variables d'environnement :
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +357,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +399,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -476,7 +476,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.NEXT_PUBLIC_SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -588,7 +588,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
diff --git a/apps/docs/content/docs/ja/sdks/python.mdx b/apps/docs/content/docs/ja/sdks/python.mdx
index 173246235..debb8bac3 100644
--- a/apps/docs/content/docs/ja/sdks/python.mdx
+++ b/apps/docs/content/docs/ja/sdks/python.mdx
@@ -214,7 +214,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +252,7 @@ run_workflow()
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,7 +284,7 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +298,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -352,8 +352,8 @@ for result in results:
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY"),
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +365,13 @@ for result in results:
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
diff --git a/apps/docs/content/docs/ja/sdks/typescript.mdx b/apps/docs/content/docs/ja/sdks/typescript.mdx
index 1d2a6e819..ea9bed31f 100644
--- a/apps/docs/content/docs/ja/sdks/typescript.mdx
+++ b/apps/docs/content/docs/ja/sdks/typescript.mdx
@@ -230,7 +230,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +271,7 @@ runWorkflow();
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +315,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +333,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +357,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +399,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -476,7 +476,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.NEXT_PUBLIC_SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -588,7 +588,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
diff --git a/apps/docs/content/docs/zh/sdks/python.mdx b/apps/docs/content/docs/zh/sdks/python.mdx
index 5ed5c3ac9..b025d831a 100644
--- a/apps/docs/content/docs/zh/sdks/python.mdx
+++ b/apps/docs/content/docs/zh/sdks/python.mdx
@@ -214,7 +214,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +252,7 @@ run_workflow()
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,7 +284,7 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +298,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -352,8 +352,8 @@ for result in results:
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY"),
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +365,13 @@ for result in results:
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
diff --git a/apps/docs/content/docs/zh/sdks/typescript.mdx b/apps/docs/content/docs/zh/sdks/typescript.mdx
index 5c48869b1..6427f0083 100644
--- a/apps/docs/content/docs/zh/sdks/typescript.mdx
+++ b/apps/docs/content/docs/zh/sdks/typescript.mdx
@@ -230,7 +230,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +271,7 @@ runWorkflow();
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +315,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +333,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +357,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +399,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -476,7 +476,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.NEXT_PUBLIC_SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -588,7 +588,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
diff --git a/apps/docs/lib/utils.ts b/apps/docs/lib/utils.ts
index bc3e8ab5a..61be1d99b 100644
--- a/apps/docs/lib/utils.ts
+++ b/apps/docs/lib/utils.ts
@@ -9,7 +9,7 @@ export function cn(...inputs: ClassValue[]) {
}
/**
- * Get the full URL for an asset stored in Vercel Blob or local fallback
+ * Get the full URL for an asset stored in Vercel Blob
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
* - Otherwise falls back to local static assets served from root path
*/
@@ -20,12 +20,3 @@ export function getAssetUrl(filename: string) {
}
return `/${filename}`
}
-
-/**
- * Get the full URL for a video asset stored in Vercel Blob or local fallback
- * - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
- * - Otherwise falls back to local static assets served from root path
- */
-export function getVideoUrl(filename: string) {
- return getAssetUrl(filename)
-}
diff --git a/apps/sim/app/(landing)/components/testimonials/testimonials.tsx b/apps/sim/app/(landing)/components/testimonials/testimonials.tsx
index e3d01cc5c..35fdc34d0 100644
--- a/apps/sim/app/(landing)/components/testimonials/testimonials.tsx
+++ b/apps/sim/app/(landing)/components/testimonials/testimonials.tsx
@@ -2,7 +2,6 @@
import { useEffect, useState } from 'react'
import Image from 'next/image'
-import { getAssetUrl } from '@/lib/utils'
import { inter } from '@/app/fonts/inter'
interface Testimonial {
@@ -14,7 +13,6 @@ interface Testimonial {
profileImage: string
}
-// Import all testimonials
const allTestimonials: Testimonial[] = [
{
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
@@ -22,7 +20,7 @@ const allTestimonials: Testimonial[] = [
username: '@hasantoxr',
viewCount: '515k',
tweetUrl: 'https://x.com/hasantoxr/status/1912909502036525271',
- profileImage: getAssetUrl('twitter/hasan.jpg'),
+ profileImage: '/twitter/hasan.jpg',
},
{
text: "Drag-and-drop AI workflows for devs who'd rather build agents than babysit them.",
@@ -30,7 +28,7 @@ const allTestimonials: Testimonial[] = [
username: '@GithubProjects',
viewCount: '90.4k',
tweetUrl: 'https://x.com/GithubProjects/status/1906383555707490499',
- profileImage: getAssetUrl('twitter/github-projects.jpg'),
+ profileImage: '/twitter/github-projects.jpg',
},
{
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
@@ -38,7 +36,7 @@ const allTestimonials: Testimonial[] = [
username: '@lazukars',
viewCount: '47.4k',
tweetUrl: 'https://x.com/lazukars/status/1913136390503600575',
- profileImage: getAssetUrl('twitter/lazukars.png'),
+ profileImage: '/twitter/lazukars.png',
},
{
text: 'omfggggg this is the zapier of agent building\n\ni always believed that building agents and using ai should not be limited to technical people. i think this solves just that\n\nthe fact that this is also open source makes me so optimistic about the future of building with ai :)))\n\ncongrats @karabegemir & @typingwala !!!',
@@ -46,7 +44,7 @@ const allTestimonials: Testimonial[] = [
username: '@nizzyabi',
viewCount: '6,269',
tweetUrl: 'https://x.com/nizzyabi/status/1907864421227180368',
- profileImage: getAssetUrl('twitter/nizzy.jpg'),
+ profileImage: '/twitter/nizzy.jpg',
},
{
text: 'A very good looking agent workflow builder 🔥 and open source!',
@@ -54,7 +52,7 @@ const allTestimonials: Testimonial[] = [
username: '@xyflowdev',
viewCount: '3,246',
tweetUrl: 'https://x.com/xyflowdev/status/1909501499719438670',
- profileImage: getAssetUrl('twitter/xyflow.jpg'),
+ profileImage: '/twitter/xyflow.jpg',
},
{
text: "One of the best products I've seen in the space, and the hustle and grind I've seen from @karabegemir and @typingwala is insane. Sim is positioned to build something game-changing, and there's no better team for the job.\n\nCongrats on the launch 🚀 🎊 great things ahead!",
@@ -62,7 +60,7 @@ const allTestimonials: Testimonial[] = [
username: '@firestorm776',
viewCount: '1,256',
tweetUrl: 'https://x.com/firestorm776/status/1907896097735061598',
- profileImage: getAssetUrl('twitter/samarth.jpg'),
+ profileImage: '/twitter/samarth.jpg',
},
{
text: 'lfgg got access to @simstudioai via @zerodotemail 😎',
@@ -70,7 +68,7 @@ const allTestimonials: Testimonial[] = [
username: '@nizzyabi',
viewCount: '1,762',
tweetUrl: 'https://x.com/nizzyabi/status/1910482357821595944',
- profileImage: getAssetUrl('twitter/nizzy.jpg'),
+ profileImage: '/twitter/nizzy.jpg',
},
{
text: 'Feels like we\'re finally getting a "Photoshop moment" for AI devs—visual, intuitive, and fast enough to keep up with ideas mid-flow.',
@@ -78,7 +76,7 @@ const allTestimonials: Testimonial[] = [
username: '@syamrajk',
viewCount: '2,784',
tweetUrl: 'https://x.com/syamrajk/status/1912911980110946491',
- profileImage: getAssetUrl('twitter/syamrajk.jpg'),
+ profileImage: '/twitter/syamrajk.jpg',
},
{
text: 'The use cases are endless. Great work @simstudioai',
@@ -86,7 +84,7 @@ const allTestimonials: Testimonial[] = [
username: '@daniel_zkim',
viewCount: '103',
tweetUrl: 'https://x.com/daniel_zkim/status/1907891273664782708',
- profileImage: getAssetUrl('twitter/daniel.jpg'),
+ profileImage: '/twitter/daniel.jpg',
},
]
@@ -95,11 +93,9 @@ export default function Testimonials() {
const [isTransitioning, setIsTransitioning] = useState(false)
const [isPaused, setIsPaused] = useState(false)
- // Create an extended array for smooth infinite scrolling
const extendedTestimonials = [...allTestimonials, ...allTestimonials]
useEffect(() => {
- // Set up automatic sliding every 3 seconds
const interval = setInterval(() => {
if (!isPaused) {
setIsTransitioning(true)
@@ -110,17 +106,15 @@ export default function Testimonials() {
return () => clearInterval(interval)
}, [isPaused])
- // Reset position when reaching the end for infinite loop
useEffect(() => {
if (currentIndex >= allTestimonials.length) {
setTimeout(() => {
setIsTransitioning(false)
setCurrentIndex(0)
- }, 500) // Match transition duration
+ }, 500)
}
}, [currentIndex])
- // Calculate the transform value
const getTransformValue = () => {
// Each card unit (card + separator) takes exactly 25% width
return `translateX(-${currentIndex * 25}%)`
diff --git a/apps/sim/app/api/chat/[identifier]/route.test.ts b/apps/sim/app/api/chat/[identifier]/route.test.ts
index 47c0bff36..c95102cf8 100644
--- a/apps/sim/app/api/chat/[identifier]/route.test.ts
+++ b/apps/sim/app/api/chat/[identifier]/route.test.ts
@@ -27,7 +27,7 @@ describe('Chat Identifier API Route', () => {
const mockAddCorsHeaders = vi.fn().mockImplementation((response) => response)
const mockValidateChatAuth = vi.fn().mockResolvedValue({ authorized: true })
const mockSetChatAuthCookie = vi.fn()
- const mockExecuteWorkflowForChat = vi.fn().mockResolvedValue(createMockStream())
+ const mockCreateStreamingResponse = vi.fn().mockResolvedValue(createMockStream())
const mockChatResult = [
{
@@ -72,7 +72,16 @@ describe('Chat Identifier API Route', () => {
validateChatAuth: mockValidateChatAuth,
setChatAuthCookie: mockSetChatAuthCookie,
validateAuthToken: vi.fn().mockReturnValue(true),
- executeWorkflowForChat: mockExecuteWorkflowForChat,
+ }))
+
+ vi.doMock('@/lib/workflows/streaming', () => ({
+ createStreamingResponse: mockCreateStreamingResponse,
+ SSE_HEADERS: {
+ 'Content-Type': 'text/event-stream',
+ 'Cache-Control': 'no-cache',
+ Connection: 'keep-alive',
+ 'X-Accel-Buffering': 'no',
+ },
}))
vi.doMock('@/lib/logs/console/logger', () => ({
@@ -369,8 +378,23 @@ describe('Chat Identifier API Route', () => {
expect(response.headers.get('Cache-Control')).toBe('no-cache')
expect(response.headers.get('Connection')).toBe('keep-alive')
- // Verify executeWorkflowForChat was called with correct parameters
- expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', 'conv-123')
+ // Verify createStreamingResponse was called with correct workflow info
+ expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
+ expect.objectContaining({
+ workflow: expect.objectContaining({
+ id: 'workflow-id',
+ userId: 'user-id',
+ }),
+ input: expect.objectContaining({
+ input: 'Hello world',
+ conversationId: 'conv-123',
+ }),
+ streamConfig: expect.objectContaining({
+ isSecureMode: true,
+ workflowTriggerType: 'chat',
+ }),
+ })
+ )
})
it('should handle streaming response body correctly', async () => {
@@ -399,8 +423,8 @@ describe('Chat Identifier API Route', () => {
})
it('should handle workflow execution errors gracefully', async () => {
- const originalExecuteWorkflow = mockExecuteWorkflowForChat.getMockImplementation()
- mockExecuteWorkflowForChat.mockImplementationOnce(async () => {
+ const originalStreamingResponse = mockCreateStreamingResponse.getMockImplementation()
+ mockCreateStreamingResponse.mockImplementationOnce(async () => {
throw new Error('Execution failed')
})
@@ -417,8 +441,8 @@ describe('Chat Identifier API Route', () => {
expect(data).toHaveProperty('error')
expect(data).toHaveProperty('message', 'Execution failed')
- if (originalExecuteWorkflow) {
- mockExecuteWorkflowForChat.mockImplementation(originalExecuteWorkflow)
+ if (originalStreamingResponse) {
+ mockCreateStreamingResponse.mockImplementation(originalStreamingResponse)
}
})
@@ -443,7 +467,7 @@ describe('Chat Identifier API Route', () => {
expect(data).toHaveProperty('message', 'Invalid request body')
})
- it('should pass conversationId to executeWorkflowForChat when provided', async () => {
+ it('should pass conversationId to streaming execution when provided', async () => {
const req = createMockRequest('POST', {
input: 'Hello world',
conversationId: 'test-conversation-123',
@@ -454,10 +478,13 @@ describe('Chat Identifier API Route', () => {
await POST(req, { params })
- expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith(
- 'chat-id',
- 'Hello world',
- 'test-conversation-123'
+ expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
+ expect.objectContaining({
+ input: expect.objectContaining({
+ input: 'Hello world',
+ conversationId: 'test-conversation-123',
+ }),
+ })
)
})
@@ -469,7 +496,13 @@ describe('Chat Identifier API Route', () => {
await POST(req, { params })
- expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', undefined)
+ expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
+ expect.objectContaining({
+ input: expect.objectContaining({
+ input: 'Hello world',
+ }),
+ })
+ )
})
})
})
diff --git a/apps/sim/app/api/chat/[identifier]/route.ts b/apps/sim/app/api/chat/[identifier]/route.ts
index 9551e9913..e349dfe74 100644
--- a/apps/sim/app/api/chat/[identifier]/route.ts
+++ b/apps/sim/app/api/chat/[identifier]/route.ts
@@ -6,7 +6,6 @@ import { createLogger } from '@/lib/logs/console/logger'
import { generateRequestId } from '@/lib/utils'
import {
addCorsHeaders,
- executeWorkflowForChat,
setChatAuthCookie,
validateAuthToken,
validateChatAuth,
@@ -15,6 +14,9 @@ import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/
const logger = createLogger('ChatIdentifierAPI')
+export const dynamic = 'force-dynamic'
+export const runtime = 'nodejs'
+
// This endpoint handles chat interactions via the identifier
export async function POST(
request: NextRequest,
@@ -106,18 +108,37 @@ export async function POST(
}
try {
- // Execute workflow with structured input (input + conversationId for context)
- const result = await executeWorkflowForChat(deployment.id, input, conversationId)
+ // Transform outputConfigs to selectedOutputs format (blockId_attribute format)
+ const selectedOutputs: string[] = []
+ if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
+ for (const config of deployment.outputConfigs) {
+ const outputId = config.path
+ ? `${config.blockId}_${config.path}`
+ : `${config.blockId}_content`
+ selectedOutputs.push(outputId)
+ }
+ }
- // The result is always a ReadableStream that we can pipe to the client
- const streamResponse = new NextResponse(result, {
- status: 200,
- headers: {
- 'Content-Type': 'text/event-stream',
- 'Cache-Control': 'no-cache',
- Connection: 'keep-alive',
- 'X-Accel-Buffering': 'no',
+ const { createStreamingResponse } = await import('@/lib/workflows/streaming')
+ const { SSE_HEADERS } = await import('@/lib/utils')
+ const { createFilteredResult } = await import('@/app/api/workflows/[id]/execute/route')
+
+ const stream = await createStreamingResponse({
+ requestId,
+ workflow: { id: deployment.workflowId, userId: deployment.userId, isDeployed: true },
+ input: { input, conversationId }, // Format for chat_trigger
+ executingUserId: deployment.userId, // Use workflow owner's ID for chat deployments
+ streamConfig: {
+ selectedOutputs,
+ isSecureMode: true,
+ workflowTriggerType: 'chat',
},
+ createFilteredResult,
+ })
+
+ const streamResponse = new NextResponse(stream, {
+ status: 200,
+ headers: SSE_HEADERS,
})
return addCorsHeaders(streamResponse, request)
} catch (error: any) {
diff --git a/apps/sim/app/api/chat/utils.test.ts b/apps/sim/app/api/chat/utils.test.ts
index c1e5b68df..30ec46c62 100644
--- a/apps/sim/app/api/chat/utils.test.ts
+++ b/apps/sim/app/api/chat/utils.test.ts
@@ -416,7 +416,7 @@ describe('Chat API Utils', () => {
execution: executionResult,
}
- // Simulate the type extraction logic from executeWorkflowForChat
+ // Test that streaming execution wraps the result correctly
const extractedFromStreaming =
streamingResult && typeof streamingResult === 'object' && 'execution' in streamingResult
? streamingResult.execution
diff --git a/apps/sim/app/api/chat/utils.ts b/apps/sim/app/api/chat/utils.ts
index 840639536..d3c33b474 100644
--- a/apps/sim/app/api/chat/utils.ts
+++ b/apps/sim/app/api/chat/utils.ts
@@ -2,28 +2,10 @@ import { db } from '@sim/db'
import { chat, workflow } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
-import { v4 as uuidv4 } from 'uuid'
-import { checkServerSideUsageLimits } from '@/lib/billing'
import { isDev } from '@/lib/environment'
-import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
-import { LoggingSession } from '@/lib/logs/execution/logging-session'
-import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import { hasAdminPermission } from '@/lib/permissions/utils'
-import { processStreamingBlockLogs } from '@/lib/tokenization'
-import { decryptSecret, generateRequestId } from '@/lib/utils'
-import { TriggerUtils } from '@/lib/workflows/triggers'
-import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
-import { getBlock } from '@/blocks'
-import { Executor } from '@/executor'
-import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
-import { Serializer } from '@/serializer'
-import { mergeSubblockState } from '@/stores/workflows/server-utils'
-import type { WorkflowState } from '@/stores/workflows/workflow/types'
-
-declare global {
- var __chatStreamProcessingTasks: Promise<{ success: boolean; error?: any }>[] | undefined
-}
+import { decryptSecret } from '@/lib/utils'
const logger = createLogger('ChatAuthUtils')
@@ -281,570 +263,3 @@ export async function validateChatAuth(
// Unknown auth type
return { authorized: false, error: 'Unsupported authentication type' }
}
-
-/**
- * Executes a workflow for a chat request and returns the formatted output.
- *
- * When workflows reference , they receive the input directly.
- * The conversationId is available at for maintaining chat context.
- *
- * @param chatId - Chat deployment identifier
- * @param input - User's chat input
- * @param conversationId - Optional ID for maintaining conversation context
- * @returns Workflow execution result formatted for the chat interface
- */
-export async function executeWorkflowForChat(
- chatId: string,
- input: string,
- conversationId?: string
-): Promise {
- const requestId = generateRequestId()
-
- logger.debug(
- `[${requestId}] Executing workflow for chat: ${chatId}${
- conversationId ? `, conversationId: ${conversationId}` : ''
- }`
- )
-
- // Find the chat deployment
- const deploymentResult = await db
- .select({
- id: chat.id,
- workflowId: chat.workflowId,
- userId: chat.userId,
- outputConfigs: chat.outputConfigs,
- customizations: chat.customizations,
- })
- .from(chat)
- .where(eq(chat.id, chatId))
- .limit(1)
-
- if (deploymentResult.length === 0) {
- logger.warn(`[${requestId}] Chat not found: ${chatId}`)
- throw new Error('Chat not found')
- }
-
- const deployment = deploymentResult[0]
- const workflowId = deployment.workflowId
- const executionId = uuidv4()
-
- const usageCheck = await checkServerSideUsageLimits(deployment.userId)
- if (usageCheck.isExceeded) {
- logger.warn(
- `[${requestId}] User ${deployment.userId} has exceeded usage limits. Skipping chat execution.`,
- {
- currentUsage: usageCheck.currentUsage,
- limit: usageCheck.limit,
- workflowId: deployment.workflowId,
- chatId,
- }
- )
- throw new Error(usageCheck.message || CHAT_ERROR_MESSAGES.USAGE_LIMIT_EXCEEDED)
- }
-
- // Set up logging for chat execution
- const loggingSession = new LoggingSession(workflowId, executionId, 'chat', requestId)
-
- // Check for multi-output configuration in customizations
- const customizations = (deployment.customizations || {}) as Record
- let outputBlockIds: string[] = []
-
- // Extract output configs from the new schema format
- let selectedOutputIds: string[] = []
- if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
- // Extract output IDs in the format expected by the streaming processor
- logger.debug(
- `[${requestId}] Found ${deployment.outputConfigs.length} output configs in deployment`
- )
-
- selectedOutputIds = deployment.outputConfigs.map((config) => {
- const outputId = config.path
- ? `${config.blockId}_${config.path}`
- : `${config.blockId}.content`
-
- logger.debug(
- `[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'content'} -> outputId=${outputId}`
- )
-
- return outputId
- })
-
- // Also extract block IDs for legacy compatibility
- outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
- } else {
- // Use customizations as fallback
- outputBlockIds = Array.isArray(customizations.outputBlockIds)
- ? customizations.outputBlockIds
- : []
- }
-
- // Fall back to customizations if we still have no outputs
- if (
- outputBlockIds.length === 0 &&
- customizations.outputBlockIds &&
- customizations.outputBlockIds.length > 0
- ) {
- outputBlockIds = customizations.outputBlockIds
- }
-
- logger.debug(
- `[${requestId}] Using ${outputBlockIds.length} output blocks and ${selectedOutputIds.length} selected output IDs for extraction`
- )
-
- // Find the workflow to check if it's deployed
- const workflowResult = await db
- .select({
- isDeployed: workflow.isDeployed,
- variables: workflow.variables,
- workspaceId: workflow.workspaceId,
- })
- .from(workflow)
- .where(eq(workflow.id, workflowId))
- .limit(1)
-
- if (workflowResult.length === 0 || !workflowResult[0].isDeployed) {
- logger.warn(`[${requestId}] Workflow not found or not deployed: ${workflowId}`)
- throw new Error('Workflow not available')
- }
-
- // Load the active deployed state from the deployment versions table
- const { loadDeployedWorkflowState } = await import('@/lib/workflows/db-helpers')
-
- let deployedState: WorkflowState
- try {
- deployedState = await loadDeployedWorkflowState(workflowId)
- } catch (error) {
- logger.error(`[${requestId}] Failed to load deployed state for workflow ${workflowId}:`, error)
- throw new Error(`Workflow must be deployed to be available for chat`)
- }
-
- const { blocks, edges, loops, parallels } = deployedState
-
- // Prepare for execution, similar to use-workflow-execution.ts
- const mergedStates = mergeSubblockState(blocks)
-
- const filteredStates = Object.entries(mergedStates).reduce(
- (acc, [id, block]) => {
- const blockConfig = getBlock(block.type)
- const isTriggerBlock = blockConfig?.category === 'triggers'
- const isChatTrigger = block.type === 'chat_trigger'
-
- // Keep all non-trigger blocks and also keep the chat_trigger block
- if (!isTriggerBlock || isChatTrigger) {
- acc[id] = block
- }
- return acc
- },
- {} as typeof mergedStates
- )
-
- const currentBlockStates = Object.entries(filteredStates).reduce(
- (acc, [id, block]) => {
- acc[id] = Object.entries(block.subBlocks).reduce(
- (subAcc, [key, subBlock]) => {
- subAcc[key] = subBlock.value
- return subAcc
- },
- {} as Record
- )
- return acc
- },
- {} as Record>
- )
-
- // Get user environment variables with workspace precedence
- let envVars: Record = {}
- try {
- const workspaceId = workflowResult[0].workspaceId || undefined
- const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
- deployment.userId,
- workspaceId
- )
- envVars = { ...personalEncrypted, ...workspaceEncrypted }
- } catch (error) {
- logger.warn(`[${requestId}] Could not fetch environment variables:`, error)
- }
-
- let workflowVariables = {}
- try {
- if (workflowResult[0].variables) {
- workflowVariables =
- typeof workflowResult[0].variables === 'string'
- ? JSON.parse(workflowResult[0].variables)
- : workflowResult[0].variables
- }
- } catch (error) {
- logger.warn(`[${requestId}] Could not parse workflow variables:`, error)
- }
-
- // Filter edges to exclude connections to/from trigger blocks (same as manual execution)
- const triggerBlockIds = Object.keys(mergedStates).filter((id) => {
- const type = mergedStates[id].type
- const blockConfig = getBlock(type)
- // Exclude chat_trigger from the list so its edges are preserved
- return blockConfig?.category === 'triggers' && type !== 'chat_trigger'
- })
-
- const filteredEdges = edges.filter(
- (edge) => !triggerBlockIds.includes(edge.source) && !triggerBlockIds.includes(edge.target)
- )
-
- // Create serialized workflow with filtered blocks and edges
- const serializedWorkflow = new Serializer().serializeWorkflow(
- filteredStates,
- filteredEdges,
- loops,
- parallels,
- true // Enable validation during execution
- )
-
- // Decrypt environment variables
- const decryptedEnvVars: Record = {}
- for (const [key, encryptedValue] of Object.entries(envVars)) {
- try {
- const { decrypted } = await decryptSecret(encryptedValue)
- decryptedEnvVars[key] = decrypted
- } catch (error: any) {
- logger.error(`[${requestId}] Failed to decrypt environment variable "${key}"`, error)
- // Log but continue - we don't want to break execution if just one var fails
- }
- }
-
- // Process block states to ensure response formats are properly parsed
- const processedBlockStates = Object.entries(currentBlockStates).reduce(
- (acc, [blockId, blockState]) => {
- // Check if this block has a responseFormat that needs to be parsed
- if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
- try {
- logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
- // Attempt to parse the responseFormat if it's a string
- const parsedResponseFormat = JSON.parse(blockState.responseFormat)
-
- acc[blockId] = {
- ...blockState,
- responseFormat: parsedResponseFormat,
- }
- } catch (error) {
- logger.warn(`[${requestId}] Failed to parse responseFormat for block ${blockId}`, error)
- acc[blockId] = blockState
- }
- } else {
- acc[blockId] = blockState
- }
- return acc
- },
- {} as Record>
- )
-
- // Start logging session
- await loggingSession.safeStart({
- userId: deployment.userId,
- workspaceId: workflowResult[0].workspaceId || '',
- variables: workflowVariables,
- })
-
- let sessionCompleted = false
-
- const stream = new ReadableStream({
- async start(controller) {
- const encoder = new TextEncoder()
- let executionResultForLogging: ExecutionResult | null = null
-
- try {
- const streamedContent = new Map()
- const streamedBlocks = new Set() // Track which blocks have started streaming
-
- const onStream = async (streamingExecution: any): Promise => {
- if (!streamingExecution.stream) return
-
- const blockId = streamingExecution.execution?.blockId
- const reader = streamingExecution.stream.getReader()
- if (blockId) {
- streamedContent.set(blockId, '')
-
- // Add separator if this is not the first block to stream
- if (streamedBlocks.size > 0) {
- // Send separator before the new block starts
- controller.enqueue(
- encoder.encode(`data: ${JSON.stringify({ blockId, chunk: '\n\n' })}\n\n`)
- )
- }
- streamedBlocks.add(blockId)
- }
- try {
- while (true) {
- const { done, value } = await reader.read()
- if (done) {
- controller.enqueue(
- encoder.encode(`data: ${JSON.stringify({ blockId, event: 'end' })}\n\n`)
- )
- break
- }
- const chunk = new TextDecoder().decode(value)
- if (blockId) {
- streamedContent.set(blockId, (streamedContent.get(blockId) || '') + chunk)
- }
- controller.enqueue(encoder.encode(`data: ${JSON.stringify({ blockId, chunk })}\n\n`))
- }
- } catch (error) {
- logger.error('Error while reading from stream:', error)
- controller.error(error)
- }
- }
-
- // Determine the start block for chat execution BEFORE creating executor
- const startBlock = TriggerUtils.findStartBlock(mergedStates, 'chat')
-
- if (!startBlock) {
- const errorMessage = CHAT_ERROR_MESSAGES.NO_CHAT_TRIGGER
- logger.error(`[${requestId}] ${errorMessage}`)
-
- if (!sessionCompleted) {
- await loggingSession.safeCompleteWithError({
- endedAt: new Date().toISOString(),
- totalDurationMs: 0,
- error: { message: errorMessage },
- traceSpans: [],
- })
- sessionCompleted = true
- }
-
- // Send error event that the client expects
- controller.enqueue(
- encoder.encode(
- `data: ${JSON.stringify({
- event: 'error',
- error: CHAT_ERROR_MESSAGES.GENERIC_ERROR,
- })}\n\n`
- )
- )
- controller.close()
- return
- }
-
- const startBlockId = startBlock.blockId
-
- // Create executor AFTER confirming we have a chat trigger
- const executor = new Executor({
- workflow: serializedWorkflow,
- currentBlockStates: processedBlockStates,
- envVarValues: decryptedEnvVars,
- workflowInput: { input: input, conversationId },
- workflowVariables,
- contextExtensions: {
- stream: true,
- selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
- edges: filteredEdges.map((e: any) => ({
- source: e.source,
- target: e.target,
- })),
- onStream,
- isDeployedContext: true,
- },
- })
-
- // Set up logging on the executor
- loggingSession.setupExecutor(executor)
-
- let result: ExecutionResult | StreamingExecution | undefined
- try {
- result = await executor.execute(workflowId, startBlockId)
- } catch (error: any) {
- logger.error(`[${requestId}] Chat workflow execution failed:`, error)
- if (!sessionCompleted) {
- const executionResult = error?.executionResult || {
- success: false,
- output: {},
- logs: [],
- }
- const { traceSpans } = buildTraceSpans(executionResult)
-
- await loggingSession.safeCompleteWithError({
- endedAt: new Date().toISOString(),
- totalDurationMs: 0,
- error: { message: error.message || 'Chat workflow execution failed' },
- traceSpans,
- })
- sessionCompleted = true
- }
-
- // Send error to stream before ending
- controller.enqueue(
- encoder.encode(
- `data: ${JSON.stringify({
- event: 'error',
- error: error.message || 'Chat workflow execution failed',
- })}\n\n`
- )
- )
- controller.close()
- return // Don't throw - just return to end the stream gracefully
- }
-
- // Handle both ExecutionResult and StreamingExecution types
- const executionResult =
- result && typeof result === 'object' && 'execution' in result
- ? (result.execution as ExecutionResult)
- : (result as ExecutionResult)
-
- executionResultForLogging = executionResult
-
- if (executionResult?.logs) {
- const processedOutputs = new Set()
- executionResult.logs.forEach((log: BlockLog) => {
- if (streamedContent.has(log.blockId)) {
- const content = streamedContent.get(log.blockId)
- if (log.output && content) {
- const separator = processedOutputs.size > 0 ? '\n\n' : ''
- log.output.content = separator + content
- processedOutputs.add(log.blockId)
- }
- }
- })
-
- const nonStreamingLogs = executionResult.logs.filter(
- (log: BlockLog) => !streamedContent.has(log.blockId)
- )
-
- const extractBlockIdFromOutputId = (outputId: string): string => {
- return outputId.includes('_') ? outputId.split('_')[0] : outputId.split('.')[0]
- }
-
- const extractPathFromOutputId = (outputId: string, blockId: string): string => {
- return outputId.substring(blockId.length + 1)
- }
-
- const parseOutputContentSafely = (output: any): any => {
- if (!output?.content) {
- return output
- }
-
- if (typeof output.content === 'string') {
- try {
- return JSON.parse(output.content)
- } catch (e) {
- return output
- }
- }
-
- return output
- }
-
- const outputsToRender = selectedOutputIds.filter((outputId) => {
- const blockIdForOutput = extractBlockIdFromOutputId(outputId)
- return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
- })
-
- for (const outputId of outputsToRender) {
- const blockIdForOutput = extractBlockIdFromOutputId(outputId)
- const path = extractPathFromOutputId(outputId, blockIdForOutput)
- const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
-
- if (log) {
- let outputValue: any = log.output
-
- if (path) {
- outputValue = parseOutputContentSafely(outputValue)
-
- const pathParts = path.split('.')
- for (const part of pathParts) {
- if (outputValue && typeof outputValue === 'object' && part in outputValue) {
- outputValue = outputValue[part]
- } else {
- outputValue = undefined
- break
- }
- }
- }
-
- if (outputValue !== undefined) {
- const separator = processedOutputs.size > 0 ? '\n\n' : ''
-
- const formattedOutput =
- typeof outputValue === 'string'
- ? outputValue
- : JSON.stringify(outputValue, null, 2)
-
- if (!log.output.content) {
- log.output.content = separator + formattedOutput
- } else {
- log.output.content = separator + formattedOutput
- }
- processedOutputs.add(log.blockId)
- }
- }
- }
-
- const processedCount = processStreamingBlockLogs(executionResult.logs, streamedContent)
- logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
-
- const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
- const enrichedResult = { ...executionResult, traceSpans, totalDuration }
- if (conversationId) {
- if (!enrichedResult.metadata) {
- enrichedResult.metadata = {
- duration: totalDuration,
- startTime: new Date().toISOString(),
- }
- }
- ;(enrichedResult.metadata as any).conversationId = conversationId
- }
- }
-
- if (!(result && typeof result === 'object' && 'stream' in result)) {
- controller.enqueue(
- encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
- )
- }
-
- if (!sessionCompleted) {
- const resultForTracing =
- executionResult || ({ success: true, output: {}, logs: [] } as ExecutionResult)
- const { traceSpans } = buildTraceSpans(resultForTracing)
- await loggingSession.safeComplete({
- endedAt: new Date().toISOString(),
- totalDurationMs: executionResult?.metadata?.duration || 0,
- finalOutput: executionResult?.output || {},
- traceSpans,
- })
- sessionCompleted = true
- }
-
- controller.close()
- } catch (error: any) {
- logger.error(`[${requestId}] Chat execution streaming error:`, error)
-
- if (!sessionCompleted && loggingSession) {
- const executionResult = executionResultForLogging ||
- (error?.executionResult as ExecutionResult | undefined) || {
- success: false,
- output: {},
- logs: [],
- }
- const { traceSpans } = buildTraceSpans(executionResult)
-
- await loggingSession.safeCompleteWithError({
- endedAt: new Date().toISOString(),
- totalDurationMs: 0,
- error: { message: error.message || 'Stream processing error' },
- traceSpans,
- })
- sessionCompleted = true
- }
-
- controller.enqueue(
- encoder.encode(
- `data: ${JSON.stringify({
- event: 'error',
- error: error.message || 'Stream processing error',
- })}\n\n`
- )
- )
-
- controller.close()
- }
- },
- })
-
- return stream
-}
diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts
index ea4bd5a3f..4d12560d1 100644
--- a/apps/sim/app/api/workflows/[id]/execute/route.ts
+++ b/apps/sim/app/api/workflows/[id]/execute/route.ts
@@ -9,6 +9,7 @@ import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-ke
import { getSession } from '@/lib/auth'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
+import { env } from '@/lib/env'
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
@@ -34,15 +35,11 @@ const logger = createLogger('WorkflowExecuteAPI')
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
-// Define the schema for environment variables
const EnvVarsSchema = z.record(z.string())
-// Keep track of running executions to prevent duplicate requests
-// Use a combination of workflow ID and request ID to allow concurrent executions with different inputs
const runningExecutions = new Set()
-// Utility function to filter out logs and workflowConnections from API response
-function createFilteredResult(result: any) {
+export function createFilteredResult(result: any) {
return {
...result,
logs: undefined,
@@ -55,7 +52,6 @@ function createFilteredResult(result: any) {
}
}
-// Custom error class for usage limit exceeded
class UsageLimitError extends Error {
statusCode: number
constructor(message: string, statusCode = 402) {
@@ -64,20 +60,76 @@ class UsageLimitError extends Error {
}
}
-async function executeWorkflow(
+/**
+ * Resolves output IDs to the internal blockId_attribute format
+ * Supports both:
+ * - User-facing format: blockName.path (e.g., "agent1.content")
+ * - Internal format: blockId_attribute (e.g., "uuid_content") - used by chat deployments
+ */
+function resolveOutputIds(
+ selectedOutputs: string[] | undefined,
+ blocks: Record
+): string[] | undefined {
+ if (!selectedOutputs || selectedOutputs.length === 0) {
+ return selectedOutputs
+ }
+
+ // UUID regex to detect if it's already in blockId_attribute format
+ const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
+
+ return selectedOutputs.map((outputId) => {
+ // If it starts with a UUID, it's already in blockId_attribute format (from chat deployments)
+ if (UUID_REGEX.test(outputId)) {
+ return outputId
+ }
+
+ // Otherwise, it's in blockName.path format from the user/API
+ const dotIndex = outputId.indexOf('.')
+ if (dotIndex === -1) {
+ logger.warn(`Invalid output ID format (missing dot): ${outputId}`)
+ return outputId
+ }
+
+ const blockName = outputId.substring(0, dotIndex)
+ const path = outputId.substring(dotIndex + 1)
+
+ // Find the block by name (case-insensitive, ignoring spaces)
+ const normalizedBlockName = blockName.toLowerCase().replace(/\s+/g, '')
+ const block = Object.values(blocks).find((b: any) => {
+ const normalized = (b.name || '').toLowerCase().replace(/\s+/g, '')
+ return normalized === normalizedBlockName
+ })
+
+ if (!block) {
+ logger.warn(`Block not found for name: ${blockName} (from output ID: ${outputId})`)
+ return outputId
+ }
+
+ const resolvedId = `${block.id}_${path}`
+ logger.debug(`Resolved output ID: ${outputId} -> ${resolvedId}`)
+ return resolvedId
+ })
+}
+
+export async function executeWorkflow(
workflow: any,
requestId: string,
input: any | undefined,
- actorUserId: string
+ actorUserId: string,
+ streamConfig?: {
+ enabled: boolean
+ selectedOutputs?: string[]
+ isSecureMode?: boolean // When true, filter out all sensitive data
+ workflowTriggerType?: 'api' | 'chat' // Which trigger block type to look for (default: 'api')
+ onStream?: (streamingExec: any) => Promise // Callback for streaming agent responses
+ onBlockComplete?: (blockId: string, output: any) => Promise // Callback when any block completes
+ }
): Promise {
const workflowId = workflow.id
const executionId = uuidv4()
- // Create a unique execution key combining workflow ID and request ID
- // This allows concurrent executions of the same workflow with different inputs
const executionKey = `${workflowId}:${requestId}`
- // Skip if this exact execution is already running (prevents duplicate requests)
if (runningExecutions.has(executionKey)) {
logger.warn(`[${requestId}] Execution is already running: ${executionKey}`)
throw new Error('Execution is already running')
@@ -275,15 +327,20 @@ async function executeWorkflow(
true // Enable validation during execution
)
- // Determine API trigger start block
- // Direct API execution ONLY works with API trigger blocks (or legacy starter in api/run mode)
- const startBlock = TriggerUtils.findStartBlock(mergedStates, 'api', false) // isChildWorkflow = false
+ // Determine trigger start block based on execution type
+ // - 'chat': For chat deployments (looks for chat_trigger block)
+ // - 'api': For direct API execution (looks for api_trigger block)
+ // streamConfig is passed from POST handler when using streaming/chat
+ const preferredTriggerType = streamConfig?.workflowTriggerType || 'api'
+ const startBlock = TriggerUtils.findStartBlock(mergedStates, preferredTriggerType, false)
if (!startBlock) {
- logger.error(`[${requestId}] No API trigger configured for this workflow`)
- throw new Error(
- 'No API trigger configured for this workflow. Add an API Trigger block or use a Start block in API mode.'
- )
+ const errorMsg =
+ preferredTriggerType === 'api'
+ ? 'No API trigger block found. Add an API Trigger block to this workflow.'
+ : 'No chat trigger block found. Add a Chat Trigger block to this workflow.'
+ logger.error(`[${requestId}] ${errorMsg}`)
+ throw new Error(errorMsg)
}
const startBlockId = startBlock.blockId
@@ -301,38 +358,50 @@ async function executeWorkflow(
}
}
+ // Build context extensions
+ const contextExtensions: any = {
+ executionId,
+ workspaceId: workflow.workspaceId,
+ isDeployedContext: true,
+ }
+
+ // Add streaming configuration if enabled
+ if (streamConfig?.enabled) {
+ contextExtensions.stream = true
+ contextExtensions.selectedOutputs = streamConfig.selectedOutputs || []
+ contextExtensions.edges = edges.map((e: any) => ({
+ source: e.source,
+ target: e.target,
+ }))
+ contextExtensions.onStream = streamConfig.onStream
+ contextExtensions.onBlockComplete = streamConfig.onBlockComplete
+ }
+
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: processedInput,
workflowVariables,
- contextExtensions: {
- executionId,
- workspaceId: workflow.workspaceId,
- isDeployedContext: true,
- },
+ contextExtensions,
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
- const result = await executor.execute(workflowId, startBlockId)
-
- // Check if we got a StreamingExecution result (with stream + execution properties)
- // For API routes, we only care about the ExecutionResult part, not the stream
- const executionResult = 'stream' in result && 'execution' in result ? result.execution : result
+ // Execute workflow (will always return ExecutionResult since we don't use onStream)
+ const result = (await executor.execute(workflowId, startBlockId)) as ExecutionResult
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
- success: executionResult.success,
- executionTime: executionResult.metadata?.duration,
+ success: result.success,
+ executionTime: result.metadata?.duration,
})
// Build trace spans from execution result (works for both success and failure)
- const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
+ const { traceSpans, totalDuration } = buildTraceSpans(result)
// Update workflow run counts if execution was successful
- if (executionResult.success) {
+ if (result.success) {
await updateWorkflowRunCounts(workflowId)
// Track API call in user stats
@@ -348,11 +417,12 @@ async function executeWorkflow(
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
- finalOutput: executionResult.output || {},
+ finalOutput: result.output || {},
traceSpans: (traceSpans || []) as any,
})
- return executionResult
+ // For non-streaming, return the execution result
+ return result
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
@@ -507,45 +577,76 @@ export async function POST(
const executionMode = request.headers.get('X-Execution-Mode')
const isAsync = executionMode === 'async'
- // Parse request body
+ // Parse request body first to check for internal parameters
const body = await request.text()
logger.info(`[${requestId}] ${body ? 'Request body provided' : 'No request body provided'}`)
- let input = {}
+ let parsedBody: any = {}
if (body) {
try {
- input = JSON.parse(body)
+ parsedBody = JSON.parse(body)
} catch (error) {
logger.error(`[${requestId}] Failed to parse request body as JSON`, error)
return createErrorResponse('Invalid JSON in request body', 400)
}
}
- logger.info(`[${requestId}] Input passed to workflow:`, input)
+ logger.info(`[${requestId}] Input passed to workflow:`, parsedBody)
- // Get authenticated user and determine trigger type
- let authenticatedUserId: string | null = null
- let triggerType: TriggerType = 'manual'
+ const extractExecutionParams = (req: NextRequest, body: any) => {
+ const internalSecret = req.headers.get('X-Internal-Secret')
+ const isInternalCall = internalSecret === env.INTERNAL_API_SECRET
- const session = await getSession()
- const apiKeyHeader = request.headers.get('X-API-Key')
- if (session?.user?.id && !apiKeyHeader) {
- authenticatedUserId = session.user.id
- triggerType = 'manual'
- } else if (apiKeyHeader) {
- const auth = await authenticateApiKeyFromHeader(apiKeyHeader)
- if (!auth.success || !auth.userId) {
- return createErrorResponse('Unauthorized', 401)
- }
- authenticatedUserId = auth.userId
- triggerType = 'api'
- if (auth.keyId) {
- void updateApiKeyLastUsed(auth.keyId).catch(() => {})
+ return {
+ isSecureMode: body.isSecureMode !== undefined ? body.isSecureMode : isInternalCall,
+ streamResponse: req.headers.get('X-Stream-Response') === 'true' || body.stream === true,
+ selectedOutputs:
+ body.selectedOutputs ||
+ (req.headers.get('X-Selected-Outputs')
+ ? JSON.parse(req.headers.get('X-Selected-Outputs')!)
+ : undefined),
+ workflowTriggerType:
+ body.workflowTriggerType || (isInternalCall && body.stream ? 'chat' : 'api'),
+ input: body.input !== undefined ? body.input : body,
}
}
- if (!authenticatedUserId) {
- return createErrorResponse('Authentication required', 401)
+ const {
+ isSecureMode: finalIsSecureMode,
+ streamResponse,
+ selectedOutputs,
+ workflowTriggerType,
+ input,
+ } = extractExecutionParams(request as NextRequest, parsedBody)
+
+ // Get authenticated user and determine trigger type
+ let authenticatedUserId: string
+ let triggerType: TriggerType = 'manual'
+
+ // For internal calls (chat deployments), use the workflow owner's ID
+ if (finalIsSecureMode) {
+ authenticatedUserId = validation.workflow.userId
+ triggerType = 'manual' // Chat deployments use manual trigger type (no rate limit)
+ } else {
+ const session = await getSession()
+ const apiKeyHeader = request.headers.get('X-API-Key')
+
+ if (session?.user?.id && !apiKeyHeader) {
+ authenticatedUserId = session.user.id
+ triggerType = 'manual'
+ } else if (apiKeyHeader) {
+ const auth = await authenticateApiKeyFromHeader(apiKeyHeader)
+ if (!auth.success || !auth.userId) {
+ return createErrorResponse('Unauthorized', 401)
+ }
+ authenticatedUserId = auth.userId
+ triggerType = 'api'
+ if (auth.keyId) {
+ void updateApiKeyLastUsed(auth.keyId).catch(() => {})
+ }
+ } else {
+ return createErrorResponse('Authentication required', 401)
+ }
}
// Get user subscription (checks both personal and org subscriptions)
@@ -631,13 +732,47 @@ export async function POST(
)
}
+ // Handle streaming response - wrap execution in SSE stream
+ if (streamResponse) {
+ // Load workflow blocks to resolve output IDs from blockName.attribute to blockId_attribute format
+ const deployedData = await loadDeployedWorkflowState(workflowId)
+ const resolvedSelectedOutputs = selectedOutputs
+ ? resolveOutputIds(selectedOutputs, deployedData.blocks || {})
+ : selectedOutputs
+
+ // Use shared streaming response creator
+ const { createStreamingResponse } = await import('@/lib/workflows/streaming')
+ const { SSE_HEADERS } = await import('@/lib/utils')
+
+ const stream = await createStreamingResponse({
+ requestId,
+ workflow: validation.workflow,
+ input,
+ executingUserId: authenticatedUserId,
+ streamConfig: {
+ selectedOutputs: resolvedSelectedOutputs,
+ isSecureMode: finalIsSecureMode,
+ workflowTriggerType,
+ },
+ createFilteredResult,
+ })
+
+ return new NextResponse(stream, {
+ status: 200,
+ headers: SSE_HEADERS,
+ })
+ }
+
+ // Non-streaming execution
const result = await executeWorkflow(
validation.workflow,
requestId,
input,
- authenticatedUserId
+ authenticatedUserId,
+ undefined
)
+ // Non-streaming response
const hasResponseBlock = workflowHasResponseBlock(result)
if (hasResponseBlock) {
return createHttpResponseFromBlock(result)
diff --git a/apps/sim/app/api/workflows/middleware.ts b/apps/sim/app/api/workflows/middleware.ts
index 5cab568a2..8ac40caed 100644
--- a/apps/sim/app/api/workflows/middleware.ts
+++ b/apps/sim/app/api/workflows/middleware.ts
@@ -1,6 +1,7 @@
import type { NextRequest } from 'next/server'
import { authenticateApiKey } from '@/lib/api-key/auth'
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
+import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { getWorkflowById } from '@/lib/workflows/utils'
@@ -37,7 +38,11 @@ export async function validateWorkflowAccess(
}
}
- // API key authentication
+ const internalSecret = request.headers.get('X-Internal-Secret')
+ if (internalSecret === env.INTERNAL_API_SECRET) {
+ return { workflow }
+ }
+
let apiKeyHeader = null
for (const [key, value] of request.headers.entries()) {
if (key.toLowerCase() === 'x-api-key' && value) {
diff --git a/apps/sim/app/chat/hooks/use-chat-streaming.ts b/apps/sim/app/chat/hooks/use-chat-streaming.ts
index 01b1a32a7..3f5473e8a 100644
--- a/apps/sim/app/chat/hooks/use-chat-streaming.ts
+++ b/apps/sim/app/chat/hooks/use-chat-streaming.ts
@@ -4,8 +4,6 @@ import { useRef, useState } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import type { ChatMessage } from '@/app/chat/components/message/message'
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
-// No longer need complex output extraction - backend handles this
-import type { ExecutionResult } from '@/executor/types'
const logger = createLogger('UseChatStreaming')
@@ -148,11 +146,16 @@ export function useChatStreaming() {
for (const line of lines) {
if (line.startsWith('data: ')) {
+ const data = line.substring(6)
+
+ if (data === '[DONE]') {
+ continue
+ }
+
try {
- const json = JSON.parse(line.substring(6))
+ const json = JSON.parse(data)
const { blockId, chunk: contentChunk, event: eventType } = json
- // Handle error events from the server
if (eventType === 'error' || json.event === 'error') {
const errorMessage = json.error || CHAT_ERROR_MESSAGES.GENERIC_ERROR
setMessages((prev) =>
@@ -172,34 +175,11 @@ export function useChatStreaming() {
}
if (eventType === 'final' && json.data) {
- // The backend has already processed and combined all outputs
- // We just need to extract the combined content and use it
- const result = json.data as ExecutionResult
-
- // Collect all content from logs that have output.content (backend processed)
- let combinedContent = ''
- if (result.logs) {
- const contentParts: string[] = []
-
- // Get content from all logs that have processed content
- result.logs.forEach((log) => {
- if (log.output?.content && typeof log.output.content === 'string') {
- // The backend already includes proper separators, so just collect the content
- contentParts.push(log.output.content)
- }
- })
-
- // Join without additional separators since backend already handles this
- combinedContent = contentParts.join('')
- }
-
- // Update the existing streaming message with the final combined content
setMessages((prev) =>
prev.map((msg) =>
msg.id === messageId
? {
...msg,
- content: combinedContent || accumulatedText, // Use combined content or fallback to streamed
isStreaming: false,
}
: msg
@@ -210,7 +190,6 @@ export function useChatStreaming() {
}
if (blockId && contentChunk) {
- // Track that this block has streamed content (like chat panel)
if (!messageIdMap.has(blockId)) {
messageIdMap.set(blockId, messageId)
}
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx
index 4a22b5f6f..af039420c 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx
@@ -1,7 +1,7 @@
'use client'
import { useState } from 'react'
-import { Eye, Maximize2, Minimize2, X } from 'lucide-react'
+import { Maximize2, Minimize2, X } from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Button } from '@/components/ui/button'
import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'
@@ -45,7 +45,6 @@ export function FrozenCanvasModal({
{/* Header */}
-
Logged Workflow State
@@ -83,14 +82,15 @@ export function FrozenCanvasModal({
traceSpans={traceSpans}
height='100%'
width='100%'
+ // Ensure preview leaves padding at edges so nodes don't touch header
/>
{/* Footer with instructions */}
- 💡 Click on blocks to see their input and output data at execution time. This canvas
- shows the exact state of the workflow when this execution was captured.
+ Click on blocks to see their input and output data at execution time. This canvas shows
+ the exact state of the workflow when this execution was captured.
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx
index 9adb54cdf..897e956c1 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx
@@ -582,6 +582,8 @@ export function FrozenCanvas({
workflowState={data.workflowState}
showSubBlocks={true}
isPannable={true}
+ defaultZoom={0.8}
+ fitPadding={0.25}
onNodeClick={(blockId) => {
// Always allow clicking blocks, even if they don't have execution data
// This is important for failed workflows where some blocks never executed
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/components/deploy-modal/components/deployment-info/components/example-command/example-command.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/components/deploy-modal/components/deployment-info/components/example-command/example-command.tsx
index e233fada3..496fd0b9e 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/components/deploy-modal/components/deployment-info/components/example-command/example-command.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/components/deploy-modal/components/deployment-info/components/example-command/example-command.tsx
@@ -12,16 +12,20 @@ import {
} from '@/components/ui/dropdown-menu'
import { Label } from '@/components/ui/label'
import { getEnv, isTruthy } from '@/lib/env'
+import { OutputSelect } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/chat/components/output-select/output-select'
interface ExampleCommandProps {
command: string
apiKey: string
endpoint: string
showLabel?: boolean
- getInputFormatExample?: () => string
+ getInputFormatExample?: (includeStreaming?: boolean) => string
+ workflowId: string | null
+ selectedStreamingOutputs: string[]
+ onSelectedStreamingOutputsChange: (outputs: string[]) => void
}
-type ExampleMode = 'sync' | 'async'
+type ExampleMode = 'sync' | 'async' | 'stream'
type ExampleType = 'execute' | 'status' | 'rate-limits'
export function ExampleCommand({
@@ -30,6 +34,9 @@ export function ExampleCommand({
endpoint,
showLabel = true,
getInputFormatExample,
+ workflowId,
+ selectedStreamingOutputs,
+ onSelectedStreamingOutputsChange,
}: ExampleCommandProps) {
const [mode, setMode] = useState('sync')
const [exampleType, setExampleType] = useState('execute')
@@ -63,11 +70,30 @@ export function ExampleCommand({
const getDisplayCommand = () => {
const baseEndpoint = endpoint.replace(apiKey, '$SIM_API_KEY')
const inputExample = getInputFormatExample
- ? getInputFormatExample()
+ ? getInputFormatExample(false) // No streaming for sync/async modes
: ' -d \'{"input": "your data here"}\''
switch (mode) {
case 'sync':
+ // For sync mode, use basic example without streaming
+ if (getInputFormatExample) {
+ const syncInputExample = getInputFormatExample(false)
+ return `curl -X POST \\
+ -H "X-API-Key: $SIM_API_KEY" \\
+ -H "Content-Type: application/json"${syncInputExample} \\
+ ${baseEndpoint}`
+ }
+ return formatCurlCommand(command, apiKey)
+
+ case 'stream':
+ // For stream mode, include streaming params
+ if (getInputFormatExample) {
+ const streamInputExample = getInputFormatExample(true)
+ return `curl -X POST \\
+ -H "X-API-Key: $SIM_API_KEY" \\
+ -H "Content-Type: application/json"${streamInputExample} \\
+ ${baseEndpoint}`
+ }
return formatCurlCommand(command, apiKey)
case 'async':
@@ -114,10 +140,11 @@ export function ExampleCommand({
}
return (
-